From f5980566ae13e04a6a03dac48dcfe78d9c404bc0 Mon Sep 17 00:00:00 2001 From: Esther Kleinhenz Date: Mon, 22 Oct 2018 18:58:10 +0200 Subject: [PATCH] added celery and tasks but most important added tag_list --- application/__init__.py | 5 + application/admin.py | 42 + application/celeryapp.py | 13 + application/email_service.py | 57 + application/forms.py | 32 + .../migrations/0006_auto_20181021_1347.py | 51 + application/models.py | 39 + application/tasks.py | 7 + application/templates/tag_list.html | 35 +- application/views.py | 12 +- croniter | 1 + log.txt | 177 ++ mysite/settings.py | 16 +- thesisenv/bin/celery | 11 + thesisenv/bin/celerybeat | 11 + thesisenv/bin/celeryd | 11 + thesisenv/bin/celeryd-multi | 11 + .../amqp-1.4.9.dist-info/DESCRIPTION.rst | 106 + .../INSTALLER | 0 .../amqp-1.4.9.dist-info/METADATA | 130 + .../site-packages/amqp-1.4.9.dist-info/RECORD | 37 + .../site-packages/amqp-1.4.9.dist-info/WHEEL | 6 + .../amqp-1.4.9.dist-info/metadata.json | 1 + .../amqp-1.4.9.dist-info/top_level.txt | 1 + .../python3.6/site-packages/amqp/__init__.py | 70 + .../site-packages/amqp/abstract_channel.py | 93 + .../site-packages/amqp/basic_message.py | 124 + .../python3.6/site-packages/amqp/channel.py | 2550 +++++++++++++++++ .../site-packages/amqp/connection.py | 1008 +++++++ .../site-packages/amqp/exceptions.py | 262 ++ .../lib/python3.6/site-packages/amqp/five.py | 191 ++ .../site-packages/amqp/method_framing.py | 231 ++ .../python3.6/site-packages/amqp/protocol.py | 13 + .../site-packages/amqp/serialization.py | 509 ++++ .../site-packages/amqp/tests/__init__.py | 0 .../site-packages/amqp/tests/case.py | 85 + .../site-packages/amqp/tests/test_channel.py | 35 + .../python3.6/site-packages/amqp/transport.py | 299 ++ .../lib/python3.6/site-packages/amqp/utils.py | 102 + .../anyjson-0.3.3-py3.6.egg-info/PKG-INFO | 85 + .../anyjson-0.3.3-py3.6.egg-info/SOURCES.txt | 15 + .../dependency_links.txt | 1 + .../installed-files.txt | 7 + .../anyjson-0.3.3-py3.6.egg-info/not-zip-safe | 1 + .../top_level.txt | 1 + .../site-packages/anyjson/__init__.py | 142 + .../billiard-3.3.0.23-py3.6.egg-info/PKG-INFO | 792 +++++ .../SOURCES.txt | 71 + .../dependency_links.txt | 1 + .../installed-files.txt | 67 + .../not-zip-safe | 1 + .../top_level.txt | 2 + .../site-packages/billiard/__init__.py | 323 +++ .../python3.6/site-packages/billiard/_ext.py | 40 + .../python3.6/site-packages/billiard/_win.py | 116 + .../site-packages/billiard/common.py | 134 + .../site-packages/billiard/compat.py | 107 + .../site-packages/billiard/connection.py | 27 + .../site-packages/billiard/dummy/__init__.py | 165 ++ .../billiard/dummy/connection.py | 93 + .../python3.6/site-packages/billiard/einfo.py | 134 + .../site-packages/billiard/exceptions.py | 54 + .../python3.6/site-packages/billiard/five.py | 192 ++ .../site-packages/billiard/forking.py | 580 ++++ .../python3.6/site-packages/billiard/heap.py | 255 ++ .../site-packages/billiard/managers.py | 1169 ++++++++ .../python3.6/site-packages/billiard/pool.py | 1959 +++++++++++++ .../site-packages/billiard/process.py | 368 +++ .../site-packages/billiard/py3/__init__.py | 0 .../site-packages/billiard/py3/connection.py | 965 +++++++ .../site-packages/billiard/py3/reduction.py | 249 ++ .../site-packages/billiard/queues.py | 372 +++ .../site-packages/billiard/reduction.py | 10 + .../site-packages/billiard/sharedctypes.py | 248 ++ .../site-packages/billiard/synchronize.py | 449 +++ .../site-packages/billiard/tests/__init__.py | 21 + .../site-packages/billiard/tests/compat.py | 85 + .../billiard/tests/test_common.py | 108 + .../billiard/tests/test_package.py | 12 + .../site-packages/billiard/tests/utils.py | 145 + .../python3.6/site-packages/billiard/util.py | 152 + .../DESCRIPTION.rst | 428 +++ .../INSTALLER} | 0 .../celery-3.1.26.post2.dist-info/METADATA | 500 ++++ .../celery-3.1.26.post2.dist-info/RECORD | 496 ++++ .../celery-3.1.26.post2.dist-info/WHEEL | 6 + .../entry_points.txt | 6 + .../metadata.json | 1 + .../top_level.txt | 1 + .../site-packages/celery/__init__.py | 155 + .../site-packages/celery/__main__.py | 54 + .../python3.6/site-packages/celery/_state.py | 159 + .../site-packages/celery/app/__init__.py | 150 + .../site-packages/celery/app/amqp.py | 512 ++++ .../site-packages/celery/app/annotations.py | 58 + .../site-packages/celery/app/base.py | 675 +++++ .../site-packages/celery/app/builtins.py | 379 +++ .../site-packages/celery/app/control.py | 317 ++ .../site-packages/celery/app/defaults.py | 274 ++ .../python3.6/site-packages/celery/app/log.py | 257 ++ .../site-packages/celery/app/registry.py | 71 + .../site-packages/celery/app/routes.py | 95 + .../site-packages/celery/app/task.py | 948 ++++++ .../site-packages/celery/app/trace.py | 441 +++ .../site-packages/celery/app/utils.py | 266 ++ .../site-packages/celery/apps/__init__.py | 0 .../site-packages/celery/apps/beat.py | 151 + .../site-packages/celery/apps/worker.py | 372 +++ .../site-packages/celery/backends/__init__.py | 68 + .../site-packages/celery/backends/amqp.py | 317 ++ .../site-packages/celery/backends/base.py | 623 ++++ .../site-packages/celery/backends/cache.py | 161 ++ .../celery/backends/cassandra.py | 196 ++ .../celery/backends/couchbase.py | 116 + .../celery/backends/database/__init__.py | 201 ++ .../celery/backends/database/models.py | 74 + .../celery/backends/database/session.py | 62 + .../site-packages/celery/backends/mongodb.py | 264 ++ .../site-packages/celery/backends/redis.py | 295 ++ .../site-packages/celery/backends/rpc.py | 67 + .../python3.6/site-packages/celery/beat.py | 571 ++++ .../site-packages/celery/bin/__init__.py | 5 + .../site-packages/celery/bin/amqp.py | 380 +++ .../site-packages/celery/bin/base.py | 668 +++++ .../site-packages/celery/bin/beat.py | 100 + .../site-packages/celery/bin/celery.py | 850 ++++++ .../celery/bin/celeryd_detach.py | 181 ++ .../site-packages/celery/bin/events.py | 139 + .../site-packages/celery/bin/graph.py | 191 ++ .../site-packages/celery/bin/multi.py | 646 +++++ .../site-packages/celery/bin/worker.py | 270 ++ .../site-packages/celery/bootsteps.py | 422 +++ .../python3.6/site-packages/celery/canvas.py | 698 +++++ .../celery/concurrency/__init__.py | 29 + .../celery/concurrency/asynpool.py | 1270 ++++++++ .../site-packages/celery/concurrency/base.py | 171 ++ .../celery/concurrency/eventlet.py | 161 ++ .../celery/concurrency/gevent.py | 136 + .../celery/concurrency/prefork.py | 178 ++ .../site-packages/celery/concurrency/solo.py | 30 + .../celery/concurrency/threads.py | 57 + .../site-packages/celery/contrib/__init__.py | 0 .../site-packages/celery/contrib/abortable.py | 172 ++ .../site-packages/celery/contrib/batches.py | 249 ++ .../site-packages/celery/contrib/methods.py | 126 + .../site-packages/celery/contrib/migrate.py | 365 +++ .../site-packages/celery/contrib/rdb.py | 183 ++ .../site-packages/celery/contrib/sphinx.py | 76 + .../site-packages/celery/datastructures.py | 671 +++++ .../site-packages/celery/events/__init__.py | 408 +++ .../site-packages/celery/events/cursesmon.py | 544 ++++ .../site-packages/celery/events/dumper.py | 109 + .../site-packages/celery/events/snapshot.py | 114 + .../site-packages/celery/events/state.py | 656 +++++ .../site-packages/celery/exceptions.py | 171 ++ .../python3.6/site-packages/celery/five.py | 392 +++ .../site-packages/celery/fixups/__init__.py | 0 .../site-packages/celery/fixups/django.py | 266 ++ .../site-packages/celery/loaders/__init__.py | 37 + .../site-packages/celery/loaders/app.py | 17 + .../site-packages/celery/loaders/base.py | 299 ++ .../site-packages/celery/loaders/default.py | 52 + .../python3.6/site-packages/celery/local.py | 373 +++ .../site-packages/celery/platforms.py | 813 ++++++ .../python3.6/site-packages/celery/result.py | 925 ++++++ .../site-packages/celery/schedules.py | 593 ++++ .../site-packages/celery/security/__init__.py | 71 + .../celery/security/certificate.py | 93 + .../site-packages/celery/security/key.py | 27 + .../celery/security/serialization.py | 110 + .../site-packages/celery/security/utils.py | 35 + .../python3.6/site-packages/celery/signals.py | 76 + .../python3.6/site-packages/celery/states.py | 153 + .../site-packages/celery/task/__init__.py | 59 + .../site-packages/celery/task/base.py | 179 ++ .../site-packages/celery/task/http.py | 220 ++ .../site-packages/celery/task/sets.py | 88 + .../site-packages/celery/task/trace.py | 12 + .../site-packages/celery/tests/__init__.py | 87 + .../celery/tests/app/__init__.py | 0 .../celery/tests/app/test_amqp.py | 228 ++ .../celery/tests/app/test_annotations.py | 56 + .../celery/tests/app/test_app.py | 726 +++++ .../celery/tests/app/test_beat.py | 539 ++++ .../celery/tests/app/test_builtins.py | 217 ++ .../celery/tests/app/test_celery.py | 18 + .../celery/tests/app/test_control.py | 251 ++ .../celery/tests/app/test_defaults.py | 60 + .../celery/tests/app/test_exceptions.py | 35 + .../celery/tests/app/test_loaders.py | 275 ++ .../celery/tests/app/test_log.py | 385 +++ .../celery/tests/app/test_registry.py | 78 + .../celery/tests/app/test_routes.py | 158 + .../celery/tests/app/test_schedules.py | 717 +++++ .../celery/tests/app/test_utils.py | 46 + .../celery/tests/backends/__init__.py | 0 .../celery/tests/backends/test_amqp.py | 406 +++ .../celery/tests/backends/test_backends.py | 41 + .../celery/tests/backends/test_base.py | 466 +++ .../celery/tests/backends/test_cache.py | 280 ++ .../celery/tests/backends/test_cassandra.py | 190 ++ .../celery/tests/backends/test_couchbase.py | 136 + .../celery/tests/backends/test_database.py | 196 ++ .../celery/tests/backends/test_mongodb.py | 366 +++ .../celery/tests/backends/test_redis.py | 282 ++ .../celery/tests/backends/test_rpc.py | 75 + .../celery/tests/bin/__init__.py | 0 .../celery/tests/bin/proj/__init__.py | 5 + .../celery/tests/bin/proj/app.py | 5 + .../celery/tests/bin/test_amqp.py | 153 + .../celery/tests/bin/test_base.py | 332 +++ .../celery/tests/bin/test_beat.py | 196 ++ .../celery/tests/bin/test_celery.py | 588 ++++ .../celery/tests/bin/test_celeryd_detach.py | 106 + .../celery/tests/bin/test_celeryevdump.py | 68 + .../celery/tests/bin/test_events.py | 73 + .../celery/tests/bin/test_multi.py | 474 +++ .../celery/tests/bin/test_worker.py | 681 +++++ .../site-packages/celery/tests/case.py | 880 ++++++ .../celery/tests/compat_modules/__init__.py | 0 .../tests/compat_modules/test_compat.py | 82 + .../tests/compat_modules/test_compat_utils.py | 50 + .../tests/compat_modules/test_decorators.py | 39 + .../celery/tests/compat_modules/test_http.py | 158 + .../tests/compat_modules/test_messaging.py | 13 + .../celery/tests/compat_modules/test_sets.py | 244 ++ .../celery/tests/concurrency/__init__.py | 0 .../tests/concurrency/test_concurrency.py | 111 + .../celery/tests/concurrency/test_eventlet.py | 118 + .../celery/tests/concurrency/test_gevent.py | 146 + .../celery/tests/concurrency/test_pool.py | 82 + .../celery/tests/concurrency/test_prefork.py | 320 +++ .../celery/tests/concurrency/test_solo.py | 24 + .../celery/tests/concurrency/test_threads.py | 60 + .../celery/tests/contrib/__init__.py | 0 .../celery/tests/contrib/test_abortable.py | 49 + .../celery/tests/contrib/test_methods.py | 34 + .../celery/tests/contrib/test_migrate.py | 314 ++ .../celery/tests/contrib/test_rdb.py | 105 + .../celery/tests/events/__init__.py | 0 .../celery/tests/events/test_cursesmon.py | 70 + .../celery/tests/events/test_events.py | 260 ++ .../celery/tests/events/test_snapshot.py | 130 + .../celery/tests/events/test_state.py | 582 ++++ .../celery/tests/fixups/__init__.py | 0 .../celery/tests/fixups/test_django.py | 301 ++ .../celery/tests/functional/__init__.py | 0 .../celery/tests/functional/case.py | 178 ++ .../celery/tests/functional/tasks.py | 24 + .../celery/tests/security/__init__.py | 68 + .../celery/tests/security/case.py | 16 + .../celery/tests/security/test_certificate.py | 78 + .../celery/tests/security/test_key.py | 26 + .../celery/tests/security/test_security.py | 110 + .../tests/security/test_serialization.py | 64 + .../celery/tests/slow/__init__.py | 0 .../celery/tests/tasks/__init__.py | 0 .../celery/tests/tasks/test_canvas.py | 346 +++ .../celery/tests/tasks/test_chord.py | 235 ++ .../celery/tests/tasks/test_context.py | 67 + .../celery/tests/tasks/test_result.py | 731 +++++ .../celery/tests/tasks/test_states.py | 31 + .../celery/tests/tasks/test_tasks.py | 464 +++ .../celery/tests/tasks/test_trace.py | 222 ++ .../celery/tests/utils/__init__.py | 0 .../celery/tests/utils/test_datastructures.py | 360 +++ .../celery/tests/utils/test_dispatcher.py | 138 + .../celery/tests/utils/test_encoding.py | 20 + .../celery/tests/utils/test_functional.py | 185 ++ .../celery/tests/utils/test_imports.py | 44 + .../celery/tests/utils/test_local.py | 364 +++ .../celery/tests/utils/test_mail.py | 53 + .../celery/tests/utils/test_pickle.py | 51 + .../celery/tests/utils/test_platforms.py | 713 +++++ .../celery/tests/utils/test_saferef.py | 94 + .../celery/tests/utils/test_serialization.py | 42 + .../celery/tests/utils/test_sysinfo.py | 33 + .../celery/tests/utils/test_term.py | 89 + .../celery/tests/utils/test_text.py | 88 + .../celery/tests/utils/test_threads.py | 109 + .../celery/tests/utils/test_timer2.py | 187 ++ .../celery/tests/utils/test_timeutils.py | 267 ++ .../celery/tests/utils/test_utils.py | 108 + .../celery/tests/worker/__init__.py | 0 .../celery/tests/worker/test_autoreload.py | 328 +++ .../celery/tests/worker/test_autoscale.py | 198 ++ .../celery/tests/worker/test_bootsteps.py | 338 +++ .../celery/tests/worker/test_components.py | 38 + .../celery/tests/worker/test_consumer.py | 512 ++++ .../celery/tests/worker/test_control.py | 601 ++++ .../celery/tests/worker/test_heartbeat.py | 73 + .../celery/tests/worker/test_hub.py | 342 +++ .../celery/tests/worker/test_loops.py | 425 +++ .../celery/tests/worker/test_request.py | 969 +++++++ .../celery/tests/worker/test_revoke.py | 13 + .../celery/tests/worker/test_state.py | 161 ++ .../celery/tests/worker/test_strategy.py | 139 + .../celery/tests/worker/test_worker.py | 1128 ++++++++ .../site-packages/celery/utils/__init__.py | 407 +++ .../site-packages/celery/utils/compat.py | 1 + .../site-packages/celery/utils/debug.py | 167 ++ .../celery/utils/dispatch/__init__.py | 6 + .../celery/utils/dispatch/saferef.py | 286 ++ .../celery/utils/dispatch/signal.py | 241 ++ .../site-packages/celery/utils/encoding.py | 14 + .../site-packages/celery/utils/functional.py | 323 +++ .../site-packages/celery/utils/imports.py | 114 + .../site-packages/celery/utils/iso8601.py | 77 + .../site-packages/celery/utils/log.py | 301 ++ .../site-packages/celery/utils/mail.py | 190 ++ .../site-packages/celery/utils/objects.py | 91 + .../celery/utils/serialization.py | 167 ++ .../site-packages/celery/utils/sysinfo.py | 45 + .../site-packages/celery/utils/term.py | 162 ++ .../site-packages/celery/utils/text.py | 86 + .../site-packages/celery/utils/threads.py | 329 +++ .../site-packages/celery/utils/timer2.py | 144 + .../site-packages/celery/utils/timeutils.py | 370 +++ .../site-packages/celery/worker/__init__.py | 393 +++ .../site-packages/celery/worker/autoreload.py | 302 ++ .../site-packages/celery/worker/autoscale.py | 162 ++ .../site-packages/celery/worker/components.py | 247 ++ .../site-packages/celery/worker/consumer.py | 887 ++++++ .../site-packages/celery/worker/control.py | 385 +++ .../site-packages/celery/worker/heartbeat.py | 58 + .../site-packages/celery/worker/job.py | 595 ++++ .../site-packages/celery/worker/loops.py | 108 + .../site-packages/celery/worker/pidbox.py | 116 + .../site-packages/celery/worker/request.py | 536 ++++ .../site-packages/celery/worker/state.py | 246 ++ .../site-packages/celery/worker/strategy.py | 95 + .../croniter-0.3.25.dist-info/INSTALLER | 1 + .../croniter-0.3.25.dist-info/METADATA | 324 +++ .../croniter-0.3.25.dist-info/RECORD | 17 + .../WHEEL | 0 .../croniter-0.3.25.dist-info/top_level.txt | 1 + .../site-packages/croniter/__init__.py | 9 + .../site-packages/croniter/croniter.py | 564 ++++ .../site-packages/croniter/tests/__init__.py | 0 .../site-packages/croniter/tests/base.py | 13 + .../croniter/tests/test_croniter.py | 837 ++++++ .../croniter/tests/test_speed.py | 225 ++ .../site-packages/dateutil/__init__.py | 8 + .../site-packages/dateutil/_common.py | 43 + .../site-packages/dateutil/_version.py | 4 + .../site-packages/dateutil/easter.py | 89 + .../site-packages/dateutil/parser/__init__.py | 60 + .../site-packages/dateutil/parser/_parser.py | 1578 ++++++++++ .../dateutil/parser/isoparser.py | 406 +++ .../site-packages/dateutil/relativedelta.py | 590 ++++ .../python3.6/site-packages/dateutil/rrule.py | 1672 +++++++++++ .../site-packages/dateutil/tz/__init__.py | 17 + .../site-packages/dateutil/tz/_common.py | 415 +++ .../site-packages/dateutil/tz/_factories.py | 49 + .../python3.6/site-packages/dateutil/tz/tz.py | 1785 ++++++++++++ .../site-packages/dateutil/tz/win.py | 331 +++ .../python3.6/site-packages/dateutil/tzwin.py | 2 + .../python3.6/site-packages/dateutil/utils.py | 71 + .../dateutil/zoneinfo/__init__.py | 167 ++ .../zoneinfo/dateutil-zoneinfo.tar.gz | Bin 0 -> 139130 bytes .../dateutil/zoneinfo/rebuild.py | 53 + .../PKG-INFO | 200 ++ .../SOURCES.txt | 137 + .../dependency_links.txt | 1 + .../installed-files.txt | 119 + .../not-zip-safe | 1 + .../requires.txt | 2 + .../top_level.txt | 1 + .../site-packages/djcelery/__init__.py | 34 + .../python3.6/site-packages/djcelery/admin.py | 385 +++ .../site-packages/djcelery/admin_utils.py | 50 + .../python3.6/site-packages/djcelery/app.py | 7 + .../djcelery/backends/__init__.py | 0 .../site-packages/djcelery/backends/cache.py | 34 + .../djcelery/backends/database.py | 65 + .../site-packages/djcelery/common.py | 72 + .../site-packages/djcelery/compat.py | 44 + .../djcelery/contrib/__init__.py | 0 .../djcelery/contrib/test_runner.py | 69 + .../python3.6/site-packages/djcelery/db.py | 63 + .../site-packages/djcelery/humanize.py | 85 + .../site-packages/djcelery/loaders.py | 202 ++ .../djcelery/management/__init__.py | 0 .../site-packages/djcelery/management/base.py | 142 + .../djcelery/management/commands/__init__.py | 0 .../djcelery/management/commands/celery.py | 22 + .../management/commands/celerybeat.py | 24 + .../djcelery/management/commands/celerycam.py | 26 + .../djcelery/management/commands/celeryd.py | 25 + .../management/commands/celeryd_detach.py | 25 + .../management/commands/celeryd_multi.py | 25 + .../djcelery/management/commands/celerymon.py | 42 + .../management/commands/djcelerymon.py | 48 + .../site-packages/djcelery/managers.py | 243 ++ .../djcelery/migrations/0001_initial.py | 163 ++ .../djcelery/migrations/__init__.py | 0 .../site-packages/djcelery/models.py | 381 +++ .../python3.6/site-packages/djcelery/mon.py | 77 + .../djcelery/monproj/__init__.py | 0 .../site-packages/djcelery/monproj/urls.py | 16 + .../site-packages/djcelery/picklefield.py | 128 + .../site-packages/djcelery/schedulers.py | 282 ++ .../site-packages/djcelery/snapshot.py | 143 + .../djcelery/static/djcelery/style.css | 4 + .../templates/admin/djcelery/change_list.html | 20 + .../djcelery/confirm_rate_limit.html | 25 + .../site-packages/djcelery/tests/__init__.py | 0 .../site-packages/djcelery/tests/_compat.py | 6 + .../site-packages/djcelery/tests/req.py | 76 + .../djcelery/tests/test_admin.py | 86 + .../djcelery/tests/test_backends/__init__.py | 0 .../tests/test_backends/test_cache.py | 115 + .../tests/test_backends/test_database.py | 105 + .../djcelery/tests/test_commands.py | 32 + .../djcelery/tests/test_discovery.py | 35 + .../djcelery/tests/test_loaders.py | 45 + .../djcelery/tests/test_models.py | 102 + .../djcelery/tests/test_schedulers.py | 336 +++ .../djcelery/tests/test_snapshot.py | 250 ++ .../djcelery/tests/test_views.py | 211 ++ .../djcelery/tests/test_worker_job.py | 82 + .../site-packages/djcelery/tests/utils.py | 7 + .../djcelery/transport/__init__.py | 10 + .../python3.6/site-packages/djcelery/urls.py | 40 + .../python3.6/site-packages/djcelery/utils.py | 92 + .../python3.6/site-packages/djcelery/views.py | 125 + .../site-packages/funtests/__init__.py | 5 + .../python3.6/site-packages/funtests/setup.py | 58 + .../kombu-3.0.37.dist-info/DESCRIPTION.rst | 332 +++ .../kombu-3.0.37.dist-info/INSTALLER | 1 + .../kombu-3.0.37.dist-info/METADATA | 364 +++ .../kombu-3.0.37.dist-info/RECORD | 225 ++ .../kombu-3.0.37.dist-info/WHEEL | 6 + .../kombu-3.0.37.dist-info/metadata.json | 1 + .../kombu-3.0.37.dist-info/top_level.txt | 1 + .../python3.6/site-packages/kombu/__init__.py | 108 + .../python3.6/site-packages/kombu/abstract.py | 119 + .../site-packages/kombu/async/__init__.py | 15 + .../site-packages/kombu/async/debug.py | 60 + .../site-packages/kombu/async/hub.py | 366 +++ .../site-packages/kombu/async/semaphore.py | 110 + .../site-packages/kombu/async/timer.py | 232 ++ .../python3.6/site-packages/kombu/clocks.py | 154 + .../python3.6/site-packages/kombu/common.py | 407 +++ .../python3.6/site-packages/kombu/compat.py | 215 ++ .../site-packages/kombu/compression.py | 83 + .../site-packages/kombu/connection.py | 1074 +++++++ .../python3.6/site-packages/kombu/entity.py | 748 +++++ .../site-packages/kombu/exceptions.py | 83 + .../lib/python3.6/site-packages/kombu/five.py | 206 ++ .../lib/python3.6/site-packages/kombu/log.py | 147 + .../python3.6/site-packages/kombu/message.py | 154 + .../site-packages/kombu/messaging.py | 608 ++++ .../python3.6/site-packages/kombu/mixins.py | 257 ++ .../python3.6/site-packages/kombu/pidbox.py | 364 +++ .../python3.6/site-packages/kombu/pools.py | 153 + .../site-packages/kombu/serialization.py | 461 +++ .../python3.6/site-packages/kombu/simple.py | 137 + .../lib/python3.6/site-packages/kombu/syn.py | 53 + .../site-packages/kombu/tests/__init__.py | 91 + .../kombu/tests/async/__init__.py | 0 .../kombu/tests/async/test_hub.py | 33 + .../kombu/tests/async/test_semaphore.py | 45 + .../site-packages/kombu/tests/case.py | 219 ++ .../site-packages/kombu/tests/mocks.py | 148 + .../site-packages/kombu/tests/test_clocks.py | 104 + .../site-packages/kombu/tests/test_common.py | 419 +++ .../site-packages/kombu/tests/test_compat.py | 331 +++ .../kombu/tests/test_compression.py | 50 + .../kombu/tests/test_connection.py | 693 +++++ .../kombu/tests/test_entities.py | 374 +++ .../site-packages/kombu/tests/test_log.py | 165 ++ .../kombu/tests/test_messaging.py | 621 ++++ .../site-packages/kombu/tests/test_mixins.py | 239 ++ .../site-packages/kombu/tests/test_pidbox.py | 287 ++ .../site-packages/kombu/tests/test_pools.py | 239 ++ .../kombu/tests/test_serialization.py | 347 +++ .../site-packages/kombu/tests/test_simple.py | 136 + .../site-packages/kombu/tests/test_syn.py | 61 + .../kombu/tests/transport/__init__.py | 0 .../kombu/tests/transport/test_SQS.py | 302 ++ .../kombu/tests/transport/test_amqplib.py | 162 ++ .../kombu/tests/transport/test_base.py | 148 + .../kombu/tests/transport/test_filesystem.py | 123 + .../kombu/tests/transport/test_librabbitmq.py | 150 + .../kombu/tests/transport/test_memory.py | 157 + .../kombu/tests/transport/test_mongodb.py | 120 + .../kombu/tests/transport/test_pyamqp.py | 179 ++ .../kombu/tests/transport/test_qpid.py | 1928 +++++++++++++ .../kombu/tests/transport/test_redis.py | 1269 ++++++++ .../kombu/tests/transport/test_sqlalchemy.py | 69 + .../kombu/tests/transport/test_transport.py | 44 + .../kombu/tests/transport/virtual/__init__.py | 0 .../tests/transport/virtual/test_base.py | 540 ++++ .../tests/transport/virtual/test_exchange.py | 161 ++ .../transport/virtual/test_scheduling.py | 67 + .../kombu/tests/utils/__init__.py | 0 .../kombu/tests/utils/test_amq_manager.py | 36 + .../kombu/tests/utils/test_debug.py | 56 + .../kombu/tests/utils/test_encoding.py | 102 + .../kombu/tests/utils/test_functional.py | 63 + .../kombu/tests/utils/test_utils.py | 412 +++ .../site-packages/kombu/transport/SLMQ.py | 186 ++ .../site-packages/kombu/transport/SQS.py | 539 ++++ .../site-packages/kombu/transport/__init__.py | 110 + .../site-packages/kombu/transport/amqplib.py | 401 +++ .../site-packages/kombu/transport/base.py | 175 ++ .../kombu/transport/beanstalk.py | 155 + .../site-packages/kombu/transport/couchdb.py | 142 + .../kombu/transport/django/__init__.py | 83 + .../transport/django/management/__init__.py | 0 .../django/management/commands/__init__.py | 0 .../commands/clean_kombu_messages.py | 22 + .../kombu/transport/django/managers.py | 95 + .../django/migrations/0001_initial.py | 50 + .../migrations/0002_auto_20181021_1329.py | 19 + .../transport/django/migrations/__init__.py | 16 + .../kombu/transport/django/models.py | 38 + .../django/south_migrations/0001_initial.py | 57 + .../django/south_migrations/__init__.py | 0 .../kombu/transport/filesystem.py | 193 ++ .../kombu/transport/librabbitmq.py | 176 ++ .../site-packages/kombu/transport/memory.py | 77 + .../site-packages/kombu/transport/mongodb.py | 338 +++ .../site-packages/kombu/transport/pyamqp.py | 155 + .../site-packages/kombu/transport/pyro.py | 99 + .../site-packages/kombu/transport/qpid.py | 1740 +++++++++++ .../site-packages/kombu/transport/redis.py | 1023 +++++++ .../kombu/transport/sqlalchemy/__init__.py | 160 ++ .../kombu/transport/sqlalchemy/models.py | 62 + .../kombu/transport/virtual/__init__.py | 854 ++++++ .../kombu/transport/virtual/exchange.py | 134 + .../kombu/transport/virtual/scheduling.py | 49 + .../site-packages/kombu/transport/zmq.py | 314 ++ .../kombu/transport/zookeeper.py | 188 ++ .../site-packages/kombu/utils/__init__.py | 453 +++ .../site-packages/kombu/utils/amq_manager.py | 18 + .../site-packages/kombu/utils/compat.py | 60 + .../site-packages/kombu/utils/debug.py | 65 + .../site-packages/kombu/utils/encoding.py | 129 + .../site-packages/kombu/utils/eventio.py | 264 ++ .../site-packages/kombu/utils/functional.py | 82 + .../site-packages/kombu/utils/limits.py | 69 + .../site-packages/kombu/utils/text.py | 47 + .../site-packages/kombu/utils/url.py | 64 + .../pip-18.1.dist-info/INSTALLER | 1 + .../LICENSE.txt | 0 .../METADATA | 3 +- .../RECORD | 176 +- .../site-packages/pip-18.1.dist-info/WHEEL | 6 + .../entry_points.txt | 0 .../pip-18.1.dist-info/top_level.txt | 1 + .../python3.6/site-packages/pip/__init__.py | 2 +- .../site-packages/pip/_internal/__init__.py | 244 +- .../site-packages/pip/_internal/build_env.py | 16 + .../site-packages/pip/_internal/cache.py | 12 +- .../pip/_internal/cli/__init__.py | 4 + .../pip/_internal/cli/autocompletion.py | 152 + .../{basecommand.py => cli/base_command.py} | 32 +- .../pip/_internal/{ => cli}/cmdoptions.py | 117 +- .../pip/_internal/cli/main_parser.py | 96 + .../{baseparser.py => cli/parser.py} | 29 +- .../pip/_internal/{ => cli}/status_codes.py | 0 .../pip/_internal/commands/__init__.py | 2 +- .../pip/_internal/commands/check.py | 2 +- .../pip/_internal/commands/completion.py | 2 +- .../pip/_internal/commands/configuration.py | 4 +- .../pip/_internal/commands/download.py | 76 +- .../pip/_internal/commands/freeze.py | 8 +- .../pip/_internal/commands/hash.py | 4 +- .../pip/_internal/commands/help.py | 3 +- .../pip/_internal/commands/install.py | 29 +- .../pip/_internal/commands/list.py | 8 +- .../pip/_internal/commands/search.py | 6 +- .../pip/_internal/commands/show.py | 4 +- .../pip/_internal/commands/uninstall.py | 7 +- .../pip/_internal/commands/wheel.py | 4 +- .../pip/_internal/configuration.py | 19 +- .../site-packages/pip/_internal/exceptions.py | 19 + .../site-packages/pip/_internal/index.py | 564 ++-- .../site-packages/pip/_internal/locations.py | 2 +- .../pip/_internal/models/candidate.py | 23 + .../pip/_internal/models/format_control.py | 62 + .../pip/_internal/models/index.py | 26 +- .../pip/_internal/models/link.py | 141 + .../pip/_internal/operations/freeze.py | 33 +- .../pip/_internal/operations/prepare.py | 48 +- .../site-packages/pip/_internal/pep425tags.py | 8 +- .../site-packages/pip/_internal/pyproject.py | 144 + .../pip/_internal/req/constructors.py | 298 ++ .../pip/_internal/req/req_file.py | 10 +- .../pip/_internal/req/req_install.py | 404 +-- .../pip/_internal/req/req_set.py | 130 +- .../pip/_internal/req/req_uninstall.py | 9 +- .../site-packages/pip/_internal/resolve.py | 4 +- .../pip/_internal/utils/appdirs.py | 2 +- .../pip/_internal/{ => utils}/compat.py | 13 + .../pip/_internal/utils/filesystem.py | 2 +- .../pip/_internal/utils/logging.py | 2 +- .../site-packages/pip/_internal/utils/misc.py | 55 +- .../pip/_internal/utils/models.py | 40 + .../pip/_internal/utils/outdated.py | 25 +- .../pip/_internal/utils/packaging.py | 17 +- .../site-packages/pip/_internal/utils/ui.py | 2 +- .../pip/_internal/vcs/__init__.py | 78 +- .../site-packages/pip/_internal/vcs/bazaar.py | 16 +- .../site-packages/pip/_internal/vcs/git.py | 117 +- .../pip/_internal/vcs/mercurial.py | 11 +- .../pip/_internal/vcs/subversion.py | 103 +- .../site-packages/pip/_internal/wheel.py | 18 +- .../pip/_vendor/certifi/__init__.py | 2 +- .../pip/_vendor/certifi/__main__.py | 2 +- .../pip/_vendor/certifi/cacert.pem | 226 +- .../pip/_vendor/packaging/__about__.py | 4 +- .../pip/_vendor/packaging/requirements.py | 8 +- .../pip/_vendor/packaging/specifiers.py | 2 +- .../pip/_vendor/pep517/__init__.py | 4 + .../pip/_vendor/pep517/_in_process.py | 182 ++ .../site-packages/pip/_vendor/pep517/check.py | 194 ++ .../pip/_vendor/pep517/colorlog.py | 110 + .../pip/_vendor/pep517/compat.py | 23 + .../pip/_vendor/pep517/envbuild.py | 150 + .../pip/_vendor/pep517/wrappers.py | 134 + .../pip/_vendor/pkg_resources/__init__.py | 67 +- .../pip/_vendor/pkg_resources/py31compat.py | 5 +- .../site-packages/pip/_vendor/pyparsing.py | 46 +- .../pip/_vendor/pytoml/parser.py | 4 +- .../pip/_vendor/requests/__init__.py | 2 +- .../python_dateutil-2.7.3.dist-info/INSTALLER | 1 + .../LICENSE.txt | 54 + .../python_dateutil-2.7.3.dist-info/METADATA | 190 ++ .../python_dateutil-2.7.3.dist-info/RECORD | 44 + .../python_dateutil-2.7.3.dist-info/WHEEL | 6 + .../top_level.txt | 1 + .../python_dateutil-2.7.3.dist-info/zip-safe | 1 + 635 files changed, 117534 insertions(+), 1732 deletions(-) create mode 100644 application/celeryapp.py create mode 100644 application/email_service.py create mode 100644 application/migrations/0006_auto_20181021_1347.py create mode 100644 application/tasks.py create mode 160000 croniter create mode 100755 thesisenv/bin/celery create mode 100755 thesisenv/bin/celerybeat create mode 100755 thesisenv/bin/celeryd create mode 100755 thesisenv/bin/celeryd-multi create mode 100644 thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/DESCRIPTION.rst rename thesisenv/lib/python3.6/site-packages/{pip-18.0.dist-info => amqp-1.4.9.dist-info}/INSTALLER (100%) create mode 100644 thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/METADATA create mode 100644 thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/RECORD create mode 100644 thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/WHEEL create mode 100644 thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/metadata.json create mode 100644 thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/top_level.txt create mode 100644 thesisenv/lib/python3.6/site-packages/amqp/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/amqp/abstract_channel.py create mode 100644 thesisenv/lib/python3.6/site-packages/amqp/basic_message.py create mode 100644 thesisenv/lib/python3.6/site-packages/amqp/channel.py create mode 100644 thesisenv/lib/python3.6/site-packages/amqp/connection.py create mode 100644 thesisenv/lib/python3.6/site-packages/amqp/exceptions.py create mode 100644 thesisenv/lib/python3.6/site-packages/amqp/five.py create mode 100644 thesisenv/lib/python3.6/site-packages/amqp/method_framing.py create mode 100644 thesisenv/lib/python3.6/site-packages/amqp/protocol.py create mode 100644 thesisenv/lib/python3.6/site-packages/amqp/serialization.py create mode 100644 thesisenv/lib/python3.6/site-packages/amqp/tests/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/amqp/tests/case.py create mode 100644 thesisenv/lib/python3.6/site-packages/amqp/tests/test_channel.py create mode 100644 thesisenv/lib/python3.6/site-packages/amqp/transport.py create mode 100644 thesisenv/lib/python3.6/site-packages/amqp/utils.py create mode 100644 thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/PKG-INFO create mode 100644 thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/SOURCES.txt create mode 100644 thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/dependency_links.txt create mode 100644 thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/installed-files.txt create mode 100644 thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/not-zip-safe create mode 100644 thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/top_level.txt create mode 100644 thesisenv/lib/python3.6/site-packages/anyjson/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/PKG-INFO create mode 100644 thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/SOURCES.txt create mode 100644 thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/dependency_links.txt create mode 100644 thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/installed-files.txt create mode 100644 thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/not-zip-safe create mode 100644 thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/top_level.txt create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/_ext.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/_win.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/common.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/compat.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/connection.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/dummy/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/dummy/connection.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/einfo.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/exceptions.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/five.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/forking.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/heap.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/managers.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/pool.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/process.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/py3/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/py3/connection.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/py3/reduction.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/queues.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/reduction.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/sharedctypes.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/synchronize.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/tests/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/tests/compat.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/tests/test_common.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/tests/test_package.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/tests/utils.py create mode 100644 thesisenv/lib/python3.6/site-packages/billiard/util.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/DESCRIPTION.rst rename thesisenv/lib/python3.6/site-packages/{pip-18.0.dist-info/top_level.txt => celery-3.1.26.post2.dist-info/INSTALLER} (100%) create mode 100644 thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/METADATA create mode 100644 thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/RECORD create mode 100644 thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/WHEEL create mode 100644 thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/entry_points.txt create mode 100644 thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/metadata.json create mode 100644 thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/top_level.txt create mode 100644 thesisenv/lib/python3.6/site-packages/celery/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/__main__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/_state.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/amqp.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/annotations.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/base.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/builtins.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/control.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/defaults.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/log.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/registry.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/routes.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/task.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/trace.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/app/utils.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/apps/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/apps/beat.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/apps/worker.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/amqp.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/base.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/cache.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/cassandra.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/couchbase.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/database/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/database/models.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/database/session.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/mongodb.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/redis.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/backends/rpc.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/beat.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/amqp.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/base.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/beat.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/celery.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/celeryd_detach.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/events.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/graph.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/multi.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/bin/worker.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/bootsteps.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/canvas.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/concurrency/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/concurrency/asynpool.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/concurrency/base.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/concurrency/eventlet.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/concurrency/gevent.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/concurrency/prefork.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/concurrency/solo.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/concurrency/threads.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/contrib/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/contrib/abortable.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/contrib/batches.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/contrib/methods.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/contrib/migrate.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/contrib/rdb.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/contrib/sphinx.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/datastructures.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/events/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/events/cursesmon.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/events/dumper.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/events/snapshot.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/events/state.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/exceptions.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/five.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/fixups/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/fixups/django.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/loaders/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/loaders/app.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/loaders/base.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/loaders/default.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/local.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/platforms.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/result.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/schedules.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/security/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/security/certificate.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/security/key.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/security/serialization.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/security/utils.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/signals.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/states.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/task/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/task/base.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/task/http.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/task/sets.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/task/trace.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_amqp.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_annotations.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_app.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_beat.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_builtins.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_celery.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_control.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_defaults.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_exceptions.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_loaders.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_log.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_registry.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_routes.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_schedules.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/app/test_utils.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_amqp.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_backends.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_base.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cache.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cassandra.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_couchbase.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_database.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_mongodb.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_redis.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_rpc.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/app.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_amqp.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_base.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_beat.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celery.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryd_detach.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryevdump.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_events.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_multi.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_worker.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/case.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat_utils.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_decorators.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_http.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_messaging.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_sets.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/concurrency/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/concurrency/test_concurrency.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/concurrency/test_eventlet.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/concurrency/test_gevent.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/concurrency/test_pool.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/concurrency/test_prefork.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/concurrency/test_solo.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/concurrency/test_threads.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/contrib/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/contrib/test_abortable.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/contrib/test_methods.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/contrib/test_migrate.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/contrib/test_rdb.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/events/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/events/test_cursesmon.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/events/test_events.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/events/test_snapshot.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/events/test_state.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/fixups/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/fixups/test_django.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/functional/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/functional/case.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/functional/tasks.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/security/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/security/case.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/security/test_certificate.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/security/test_key.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/security/test_security.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/security/test_serialization.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/slow/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/tasks/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_canvas.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_chord.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_context.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_result.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_states.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_tasks.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_trace.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_datastructures.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_dispatcher.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_encoding.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_functional.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_imports.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_local.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_mail.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_pickle.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_platforms.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_saferef.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_serialization.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_sysinfo.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_term.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_text.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_threads.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timer2.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timeutils.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_utils.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoreload.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoscale.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_bootsteps.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_components.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_consumer.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_control.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_heartbeat.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_hub.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_loops.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_request.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_revoke.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_state.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_strategy.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_worker.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/compat.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/debug.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/saferef.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/signal.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/encoding.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/functional.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/imports.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/iso8601.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/log.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/mail.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/objects.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/serialization.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/sysinfo.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/term.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/text.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/threads.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/timer2.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/utils/timeutils.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/autoreload.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/autoscale.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/components.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/consumer.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/control.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/heartbeat.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/job.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/loops.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/pidbox.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/request.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/state.py create mode 100644 thesisenv/lib/python3.6/site-packages/celery/worker/strategy.py create mode 100644 thesisenv/lib/python3.6/site-packages/croniter-0.3.25.dist-info/INSTALLER create mode 100644 thesisenv/lib/python3.6/site-packages/croniter-0.3.25.dist-info/METADATA create mode 100644 thesisenv/lib/python3.6/site-packages/croniter-0.3.25.dist-info/RECORD rename thesisenv/lib/python3.6/site-packages/{pip-18.0.dist-info => croniter-0.3.25.dist-info}/WHEEL (100%) create mode 100644 thesisenv/lib/python3.6/site-packages/croniter-0.3.25.dist-info/top_level.txt create mode 100644 thesisenv/lib/python3.6/site-packages/croniter/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/croniter/croniter.py create mode 100644 thesisenv/lib/python3.6/site-packages/croniter/tests/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/croniter/tests/base.py create mode 100644 thesisenv/lib/python3.6/site-packages/croniter/tests/test_croniter.py create mode 100644 thesisenv/lib/python3.6/site-packages/croniter/tests/test_speed.py create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/_common.py create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/_version.py create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/easter.py create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/parser/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/parser/_parser.py create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/parser/isoparser.py create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/relativedelta.py create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/rrule.py create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/tz/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/tz/_common.py create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/tz/_factories.py create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/tz/tz.py create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/tz/win.py create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/tzwin.py create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/utils.py create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/zoneinfo/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz create mode 100644 thesisenv/lib/python3.6/site-packages/dateutil/zoneinfo/rebuild.py create mode 100644 thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/PKG-INFO create mode 100644 thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/SOURCES.txt create mode 100644 thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/dependency_links.txt create mode 100644 thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/installed-files.txt create mode 100644 thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/not-zip-safe create mode 100644 thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/requires.txt create mode 100644 thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/top_level.txt create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/admin.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/admin_utils.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/app.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/backends/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/backends/cache.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/backends/database.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/common.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/compat.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/contrib/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/contrib/test_runner.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/db.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/humanize.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/loaders.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/management/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/management/base.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/management/commands/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celery.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerybeat.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerycam.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd_detach.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd_multi.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerymon.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/management/commands/djcelerymon.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/managers.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/migrations/0001_initial.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/migrations/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/models.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/mon.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/monproj/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/monproj/urls.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/picklefield.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/schedulers.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/snapshot.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/static/djcelery/style.css create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/templates/admin/djcelery/change_list.html create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/templates/djcelery/confirm_rate_limit.html create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/tests/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/tests/_compat.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/tests/req.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/tests/test_admin.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/test_cache.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/test_database.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/tests/test_commands.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/tests/test_discovery.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/tests/test_loaders.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/tests/test_models.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/tests/test_schedulers.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/tests/test_snapshot.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/tests/test_views.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/tests/test_worker_job.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/tests/utils.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/transport/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/urls.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/utils.py create mode 100644 thesisenv/lib/python3.6/site-packages/djcelery/views.py create mode 100644 thesisenv/lib/python3.6/site-packages/funtests/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/funtests/setup.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/DESCRIPTION.rst create mode 100644 thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/INSTALLER create mode 100644 thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/METADATA create mode 100644 thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/RECORD create mode 100644 thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/WHEEL create mode 100644 thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/metadata.json create mode 100644 thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/top_level.txt create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/abstract.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/async/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/async/debug.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/async/hub.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/async/semaphore.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/async/timer.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/clocks.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/common.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/compat.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/compression.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/connection.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/entity.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/exceptions.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/five.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/log.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/message.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/messaging.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/mixins.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/pidbox.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/pools.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/serialization.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/simple.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/syn.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/async/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/async/test_hub.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/async/test_semaphore.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/case.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/mocks.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/test_clocks.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/test_common.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/test_compat.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/test_compression.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/test_connection.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/test_entities.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/test_log.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/test_messaging.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/test_mixins.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/test_pidbox.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/test_pools.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/test_serialization.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/test_simple.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/test_syn.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/transport/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_SQS.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_amqplib.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_base.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_filesystem.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_librabbitmq.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_memory.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_mongodb.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_pyamqp.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_qpid.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_redis.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_sqlalchemy.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_transport.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/transport/virtual/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/transport/virtual/test_base.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/transport/virtual/test_exchange.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/transport/virtual/test_scheduling.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/utils/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/utils/test_amq_manager.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/utils/test_debug.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/utils/test_encoding.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/utils/test_functional.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/tests/utils/test_utils.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/SLMQ.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/SQS.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/amqplib.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/base.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/beanstalk.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/couchdb.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/django/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/django/management/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/django/management/commands/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/django/management/commands/clean_kombu_messages.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/django/managers.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/django/migrations/0001_initial.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/django/migrations/0002_auto_20181021_1329.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/django/migrations/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/django/models.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/django/south_migrations/0001_initial.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/django/south_migrations/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/filesystem.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/librabbitmq.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/memory.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/mongodb.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/pyamqp.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/pyro.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/qpid.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/redis.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/sqlalchemy/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/sqlalchemy/models.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/virtual/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/virtual/exchange.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/virtual/scheduling.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/zmq.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/transport/zookeeper.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/utils/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/utils/amq_manager.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/utils/compat.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/utils/debug.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/utils/encoding.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/utils/eventio.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/utils/functional.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/utils/limits.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/utils/text.py create mode 100644 thesisenv/lib/python3.6/site-packages/kombu/utils/url.py create mode 100644 thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/INSTALLER rename thesisenv/lib/python3.6/site-packages/{pip-18.0.dist-info => pip-18.1.dist-info}/LICENSE.txt (100%) rename thesisenv/lib/python3.6/site-packages/{pip-18.0.dist-info => pip-18.1.dist-info}/METADATA (97%) rename thesisenv/lib/python3.6/site-packages/{pip-18.0.dist-info => pip-18.1.dist-info}/RECORD (81%) create mode 100644 thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/WHEEL rename thesisenv/lib/python3.6/site-packages/{pip-18.0.dist-info => pip-18.1.dist-info}/entry_points.txt (100%) create mode 100644 thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/top_level.txt create mode 100644 thesisenv/lib/python3.6/site-packages/pip/_internal/cli/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/pip/_internal/cli/autocompletion.py rename thesisenv/lib/python3.6/site-packages/pip/_internal/{basecommand.py => cli/base_command.py} (93%) rename thesisenv/lib/python3.6/site-packages/pip/_internal/{ => cli}/cmdoptions.py (81%) create mode 100644 thesisenv/lib/python3.6/site-packages/pip/_internal/cli/main_parser.py rename thesisenv/lib/python3.6/site-packages/pip/_internal/{baseparser.py => cli/parser.py} (89%) rename thesisenv/lib/python3.6/site-packages/pip/_internal/{ => cli}/status_codes.py (100%) create mode 100644 thesisenv/lib/python3.6/site-packages/pip/_internal/models/candidate.py create mode 100644 thesisenv/lib/python3.6/site-packages/pip/_internal/models/format_control.py create mode 100644 thesisenv/lib/python3.6/site-packages/pip/_internal/models/link.py create mode 100644 thesisenv/lib/python3.6/site-packages/pip/_internal/pyproject.py create mode 100644 thesisenv/lib/python3.6/site-packages/pip/_internal/req/constructors.py rename thesisenv/lib/python3.6/site-packages/pip/_internal/{ => utils}/compat.py (96%) create mode 100644 thesisenv/lib/python3.6/site-packages/pip/_internal/utils/models.py create mode 100644 thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/__init__.py create mode 100644 thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/_in_process.py create mode 100644 thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/check.py create mode 100644 thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/colorlog.py create mode 100644 thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/compat.py create mode 100644 thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/envbuild.py create mode 100644 thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/wrappers.py create mode 100644 thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/INSTALLER create mode 100644 thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/LICENSE.txt create mode 100644 thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/METADATA create mode 100644 thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/RECORD create mode 100644 thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/WHEEL create mode 100644 thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/top_level.txt create mode 100644 thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/zip-safe diff --git a/application/__init__.py b/application/__init__.py index e69de29..1bf50a5 100644 --- a/application/__init__.py +++ b/application/__init__.py @@ -0,0 +1,5 @@ +from __future__ import absolute_import, unicode_literals +# This will make sure celery is always imported when +# Django starts so that shared_task will use this app. +from .celeryapp import app as celery_app +__all__ = ['celery_app'] \ No newline at end of file diff --git a/application/admin.py b/application/admin.py index 544ec6f..02ff7b2 100644 --- a/application/admin.py +++ b/application/admin.py @@ -4,6 +4,8 @@ from django.contrib.auth.admin import UserAdmin as BaseUserAdmin from django.contrib.auth.models import User from .models import Post, CustomUser +from .models import ScheduledReport, ReportRecipient, ScheduledReportGroup +from .forms import ScheduledReportForm class CustomUserInline(admin.StackedInline): @@ -20,3 +22,43 @@ admin.site.unregister(User) admin.site.register(User, UserAdmin) admin.site.register(Post) + + +class ReportRecipientAdmin(admin.TabularInline): + model = ReportRecipient +class ScheduledReportAdmin(admin.ModelAdmin): + """ + List display for Scheduled reports in Django admin + """ + model = ScheduledReport + list_display = ('id', 'get_recipients') + inlines = [ + ReportRecipientAdmin + ] + form = ScheduledReportForm + def get_recipients(self, model): + recipients = model.reportrecep.all().values_list('email', flat=True) + if not recipients: + return 'No recipients added' + recipient_list = '' + for recipient in recipients: + recipient_list = recipient_list + recipient + ', ' + return recipient_list[:-2] + get_recipients.short_description = 'Recipients' + get_recipients.allow_tags = True +class ScheduledReportGroupAdmin(admin.ModelAdmin): + """ + List display for ScheduledReportGroup Admin + """ + model = ScheduledReportGroup + list_display = ('get_scheduled_report_name','get_report_name') + def get_scheduled_report_name(self, model): + return model.scheduled_report.subject + def get_report_name(self, model): + return model.report.name + get_scheduled_report_name.short_description = "Scheduled Report Name" + get_report_name.short_description = "Report Name" + show_change_link = True + get_report_name.allow_tags = True +admin.site.register(ScheduledReport, ScheduledReportAdmin) +admin.site.register(ScheduledReportGroup, ScheduledReportGroupAdmin) \ No newline at end of file diff --git a/application/celeryapp.py b/application/celeryapp.py new file mode 100644 index 0000000..7848051 --- /dev/null +++ b/application/celeryapp.py @@ -0,0 +1,13 @@ +from __future__ import absolute_import +import os +from celery import Celery +# set the default Django settings module for the 'celery' program. +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings') +from django.conf import settings + +app = Celery('application') +# Using a string here means the worker don't have to serialize +# the configuration object to child processes. +app.config_from_object('django.conf:settings') +# Load task modules from all registered Django app configs. +app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) \ No newline at end of file diff --git a/application/email_service.py b/application/email_service.py new file mode 100644 index 0000000..574dede --- /dev/null +++ b/application/email_service.py @@ -0,0 +1,57 @@ +from datetime import datetime, timedelta +from django.core.mail import send_mail +from django.template import Template, Context +from django.http import HttpResponse +from django.conf import settings +from .models import ScheduledReport, ScheduledReportGroup, ReportRecipient +class ScheduledReportConfig(object): + def __init__(self, scheduled_report): + """ + Expects a scheduled report object and inititializes + its own scheduled_report attribute with it + """ + self.scheduled_report = scheduled_report + def get_report_config(self): + """ + Returns the configuration related to a scheduled report, needed + to populate the email + """ + return { + "template_context": self._get_related_reports_data(), + "recipients": self._get_report_recipients() + } + def _get_related_reports_data(self): + """ + Returns the list of reports data which needs to be sent out in a scheduled report + """ + pass + def _get_report_recipients(self): + """ + Returns the recipient list for a scheduled report + """ + pass +def create_email_data(content=None): + content = ''' + + + ''' + str(content) + '''''' + return content +def send_emails(): + current_time = datetime.utcnow() + scheduled_reports = ScheduledReport.objects.filter(next_run_at__lt = current_time) + for scheduled_report in scheduled_reports: + report_config = ScheduledReportConfig(scheduled_report).get_report_config() + """ Specify the template path you want to send out in the email. """ + template = Template(create_email_data('path/to/your/email_template.html')) + """ //Create your email html using Django's context processor """ + report_template = template.render(Context(report_config['template_context'])) + scheduled_report.save() + if not scheduled_report.subject: + """ Handle exception for subject not provided """ + if not report_config['recipients']: + """ Handle exception for recipients not provided """ + send_mail( + scheduled_report.subject, 'Here is the message.', + settings.EMAIL_HOST_USER, report_config['recipients'], + fail_silently=False, html_message=report_template + ) \ No newline at end of file diff --git a/application/forms.py b/application/forms.py index 3311509..188d7ce 100644 --- a/application/forms.py +++ b/application/forms.py @@ -1,8 +1,16 @@ from django import forms +from datetime import datetime + from .models import Post, CustomUser +from django.forms import ModelForm, ValidationError from taggit.forms import * from django.contrib.auth.forms import UserCreationForm, UserChangeForm +from datetime import datetime +from croniter import croniter +from django.forms import ModelForm, ValidationError +from .models import ScheduledReport + class PostForm(forms.ModelForm): class Meta: model = Post @@ -13,3 +21,27 @@ class NewTagForm(forms.ModelForm): class Meta: model = CustomUser fields = ['m_tags'] + + +class ScheduledReportForm(ModelForm): + class Meta: + model = ScheduledReport + fields = ['subject', 'cron_expression'] + fields = ['subject', 'cron_expression'] + help_texts = {'cron_expression': 'Scheduled time is considered in UTC'} + def clean(self): + cleaned_data = super(ScheduledReportForm, self).clean() + cron_expression = cleaned_data.get("cron_expression") + try: + iter = croniter(cron_expression, datetime.now()) + except: + raise ValidationError("Incorrect cron expression:\ + The information you must include is (in order of appearance):\ + A number (or list of numbers, or range of numbers), m, representing the minute of the hour\ + A number (or list of numbers, or range of numbers), h, representing the hour of the day\ + A number (or list of numbers, or range of numbers), dom, representing the day of the month\ + A number (or list, or range), or name (or list of names), mon, representing the month of the year\ + A number (or list, or range), or name (or list of names), dow, representing the day of the week\ + The asterisks (*) in our entry tell cron that for that unit of time, the job should be run every.\ + Eg. */5 * * * * cron for executing every 5 mins") + return cleaned_data \ No newline at end of file diff --git a/application/migrations/0006_auto_20181021_1347.py b/application/migrations/0006_auto_20181021_1347.py new file mode 100644 index 0000000..eb84f59 --- /dev/null +++ b/application/migrations/0006_auto_20181021_1347.py @@ -0,0 +1,51 @@ +# Generated by Django 2.1 on 2018-10-21 11:47 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('application', '0005_auto_20181019_1645'), + ] + + operations = [ + migrations.CreateModel( + name='Report', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('report_text', models.TextField()), + ], + ), + migrations.CreateModel( + name='ReportRecipient', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('email', models.EmailField(max_length=254)), + ], + ), + migrations.CreateModel( + name='ScheduledReport', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('subject', models.CharField(max_length=200)), + ('last_run_at', models.DateTimeField(blank=True, null=True)), + ('next_run_at', models.DateTimeField(blank=True, null=True)), + ('cron_expression', models.CharField(max_length=200)), + ], + ), + migrations.CreateModel( + name='ScheduledReportGroup', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('report', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='report', to='application.Report')), + ('scheduled_report', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='relatedscheduledreport', to='application.ScheduledReport')), + ], + ), + migrations.AddField( + model_name='reportrecipient', + name='scheduled_report', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reportrecep', to='application.ScheduledReport'), + ), + ] diff --git a/application/models.py b/application/models.py index 54500f5..1f1738a 100644 --- a/application/models.py +++ b/application/models.py @@ -2,6 +2,9 @@ from django.db import models from django.contrib.auth.models import User from django.utils import timezone from taggit.managers import TaggableManager +from datetime import datetime +from croniter import croniter + class CustomUser(models.Model): @@ -24,3 +27,39 @@ class Post(models.Model): def __str__(self): return self.title + +class Report(models.Model): + report_text = models.TextField() + +class ScheduledReport(models.Model): + """ + Contains email subject and cron expression,to evaluate when the email has to be sent + """ + subject = models.CharField(max_length=200) + last_run_at = models.DateTimeField(null=True, blank=True) + next_run_at = models.DateTimeField(null=True, blank=True) + cron_expression = models.CharField(max_length=200) + def save(self, *args, **kwargs): + """ + function to evaluate "next_run_at" using the cron expression, so that it is updated once the report is sent. + """ + self.last_run_at = datetime.now() + iter = croniter(self.cron_expression, self.last_run_at) + self.next_run_at = iter.get_next(datetime) + super(ScheduledReport, self).save(*args, **kwargs) + def __unicode__(self): + return self.subject + +class ScheduledReportGroup(models.Model): + """ + Many to many mapping between reports which will be sent out in a scheduled report + """ + report = models.ForeignKey(Report, related_name='report', on_delete=models.CASCADE) + scheduled_report = models.ForeignKey(ScheduledReport, + related_name='relatedscheduledreport', on_delete=models.CASCADE) +class ReportRecipient(models.Model): + """ + Stores all the recipients of the given scheduled report + """ + email = models.EmailField() + scheduled_report = models.ForeignKey(ScheduledReport, related_name='reportrecep', on_delete=models.CASCADE) \ No newline at end of file diff --git a/application/tasks.py b/application/tasks.py new file mode 100644 index 0000000..ccc572f --- /dev/null +++ b/application/tasks.py @@ -0,0 +1,7 @@ +from celery.task.schedules import crontab +from celery.decorators import periodic_task +from .email_service import send_emails +# this will run every minute, see http://celeryproject.org/docs/reference/celery.task.schedules.html#celery.task.schedules.crontab +@periodic_task(run_every=crontab(hour="*", minute="*", day_of_week="*")) +def trigger_emails(): + send_emails() \ No newline at end of file diff --git a/application/templates/tag_list.html b/application/templates/tag_list.html index 4cd467b..041f4ef 100644 --- a/application/templates/tag_list.html +++ b/application/templates/tag_list.html @@ -1,23 +1,24 @@ -{% extends "base.html" %} {% block content %} {% load taggit_templatetags2_tags %} +{% extends "base.html" %} {% block content %} - -
- Your tags: - +
- {{ u }} {{ arr }} -
-
- from List: {% for tag in tags %} - - {% for tag in posts %} -

{{ tag.name }}

{% endfor %}{% endfor %} +{% for post in posts %} +
+
+ {{ post.published_date }} +
+

+ {{ post.title }} +

+

{{ post.text|linebreaks }}

+ Tags: {% for tag in post.tags.all %} + {{ tag.name }} + {% if not forloop.last %}, {% endif %} {% endfor %}

+ {{ post.author }} +

+
+ {% endfor %}
{% endblock %} \ No newline at end of file diff --git a/application/views.py b/application/views.py index 79bff29..f792e68 100644 --- a/application/views.py +++ b/application/views.py @@ -146,13 +146,15 @@ def student_page(request): @login_required def tag_list(request): log = logging.getLogger('mysite') - u = CustomUser.objects.get(user=request.user) - log.info(u) - tags = Tag.objects.filter(customuser__user = u) - log.info(tags) + u = User.objects.get(username=request.user) + if u: + tags_user = Tag.objects.filter(customuser__user = u) + log.info(tags_user) arr = [] - for tag in tags: + for tag in tags_user: arr.append(str(tag)) + log.info(tag) + posts = Post.objects.filter(tags__in=[tag]).order_by('-published_date') return render(request, 'tag_list.html', locals()) class TagSearch(TagCanvasListView): diff --git a/croniter b/croniter new file mode 160000 index 0000000..3273cbc --- /dev/null +++ b/croniter @@ -0,0 +1 @@ +Subproject commit 3273cbc777423138f9b6cfb127de503b63ccd22f diff --git a/log.txt b/log.txt index 7990e1f..bf3dfd3 100644 --- a/log.txt +++ b/log.txt @@ -138,3 +138,180 @@ [21/Oct/2018 10:35:50] INFO [mysite:184] taggit [21/Oct/2018 10:35:55] INFO [mysite:184] [21/Oct/2018 10:38:17] INFO [mysite:184] None +[21/Oct/2018 12:59:59] INFO [mysite:184] None +[22/Oct/2018 14:31:54] INFO [mysite:184] None +[22/Oct/2018 14:32:03] INFO [mysite:150] CustomUser object (25) +[22/Oct/2018 14:40:08] INFO [mysite:150] CustomUser object (25) +[22/Oct/2018 14:43:18] INFO [mysite:150] CustomUser object (25) +[22/Oct/2018 14:43:31] INFO [mysite:150] CustomUser object (25) +[22/Oct/2018 14:50:32] INFO [mysite:150] esthi +[22/Oct/2018 14:50:32] INFO [mysite:152] , ]> +[22/Oct/2018 14:51:10] INFO [mysite:150] stefan +[22/Oct/2018 14:51:10] INFO [mysite:152] ]> +[22/Oct/2018 14:56:36] INFO [mysite:150] stefan +[22/Oct/2018 14:56:36] INFO [mysite:152] ]> +[22/Oct/2018 14:56:56] INFO [mysite:150] stefan +[22/Oct/2018 14:56:56] INFO [mysite:152] ]> +[22/Oct/2018 14:57:15] INFO [mysite:150] stefan +[22/Oct/2018 14:57:15] INFO [mysite:152] ]> +[22/Oct/2018 14:57:39] INFO [mysite:150] stefan +[22/Oct/2018 14:57:39] INFO [mysite:152] ]> +[22/Oct/2018 14:58:53] INFO [mysite:150] stefan +[22/Oct/2018 14:58:53] INFO [mysite:152] ]> +[22/Oct/2018 14:59:01] INFO [mysite:150] stefan +[22/Oct/2018 14:59:01] INFO [mysite:152] ]> +[22/Oct/2018 14:59:55] INFO [mysite:150] stefan +[22/Oct/2018 14:59:55] INFO [mysite:152] ]> +[22/Oct/2018 15:02:16] INFO [mysite:150] stefan +[22/Oct/2018 15:02:16] INFO [mysite:152] ]> +[22/Oct/2018 15:03:09] INFO [mysite:150] stefan +[22/Oct/2018 15:03:09] INFO [mysite:152] ]> +[22/Oct/2018 15:03:41] INFO [mysite:150] stefan +[22/Oct/2018 15:03:41] INFO [mysite:152] ]> +[22/Oct/2018 15:05:17] INFO [mysite:184] None +[22/Oct/2018 15:05:21] INFO [mysite:184] hi +[22/Oct/2018 15:07:51] INFO [mysite:185] None +[22/Oct/2018 15:07:53] INFO [mysite:150] stefan +[22/Oct/2018 15:07:53] INFO [mysite:152] ]> +[22/Oct/2018 15:08:37] INFO [mysite:150] stefan +[22/Oct/2018 15:08:37] INFO [mysite:152] ]> +[22/Oct/2018 15:09:08] INFO [mysite:151] stefan +[22/Oct/2018 15:09:08] INFO [mysite:153] ]> +[22/Oct/2018 15:09:58] INFO [mysite:151] stefan +[22/Oct/2018 15:09:58] INFO [mysite:153] ]> +[22/Oct/2018 15:11:00] INFO [mysite:151] stefan +[22/Oct/2018 15:11:00] INFO [mysite:153] ]> +[22/Oct/2018 15:11:41] INFO [mysite:151] stefan +[22/Oct/2018 15:11:41] INFO [mysite:153] ]> +[22/Oct/2018 15:12:14] INFO [mysite:151] stefan +[22/Oct/2018 15:12:14] INFO [mysite:153] ]> +[22/Oct/2018 15:14:15] INFO [mysite:151] stefan +[22/Oct/2018 15:14:15] INFO [mysite:153] ]> +[22/Oct/2018 17:37:38] INFO [mysite:154] ]> +[22/Oct/2018 17:38:19] INFO [mysite:154] , ]> +[22/Oct/2018 17:39:44] INFO [mysite:151] , , ]> +[22/Oct/2018 17:39:44] INFO [mysite:155] , ]> +[22/Oct/2018 17:41:03] INFO [mysite:151] , , ]> +[22/Oct/2018 17:41:19] INFO [mysite:151] , , ]> +[22/Oct/2018 17:41:20] INFO [mysite:155] , ]> +[22/Oct/2018 17:45:07] INFO [mysite:151] , , ]> +[22/Oct/2018 17:45:07] INFO [mysite:155] , ]> +[22/Oct/2018 17:45:32] INFO [mysite:151] , , ]> +[22/Oct/2018 17:45:32] INFO [mysite:155] , ]> +[22/Oct/2018 17:45:47] INFO [mysite:151] , , ]> +[22/Oct/2018 17:45:47] INFO [mysite:155] , ]> +[22/Oct/2018 17:45:47] INFO [mysite:160] ['bamberg'] +[22/Oct/2018 17:45:47] INFO [mysite:160] ['bamberg', 'test'] +[22/Oct/2018 17:49:21] INFO [mysite:151] , , ]> +[22/Oct/2018 17:49:21] INFO [mysite:155] , ]> +[22/Oct/2018 17:49:21] INFO [mysite:159] ['bamberg'] +[22/Oct/2018 17:49:21] INFO [mysite:159] ['bamberg', 'test'] +[22/Oct/2018 17:51:20] INFO [mysite:151] , , ]> +[22/Oct/2018 17:51:20] INFO [mysite:155] , ]> +[22/Oct/2018 17:51:20] INFO [mysite:159] ['bamberg'] +[22/Oct/2018 17:51:20] INFO [mysite:159] ['bamberg', 'test'] +[22/Oct/2018 17:51:27] INFO [mysite:151] , , ]> +[22/Oct/2018 17:51:27] INFO [mysite:155] , ]> +[22/Oct/2018 17:51:28] INFO [mysite:159] ['bamberg'] +[22/Oct/2018 17:51:28] INFO [mysite:159] ['bamberg', 'test'] +[22/Oct/2018 17:53:13] INFO [mysite:151] , , ]> +[22/Oct/2018 17:53:55] INFO [mysite:151] , , ]> +[22/Oct/2018 17:53:55] INFO [mysite:154] , ]> +[22/Oct/2018 17:53:55] INFO [mysite:158] ['bamberg'] +[22/Oct/2018 17:53:55] INFO [mysite:158] ['bamberg', 'test'] +[22/Oct/2018 17:54:44] INFO [mysite:151] , , ]> +[22/Oct/2018 17:54:44] INFO [mysite:154] , ]> +[22/Oct/2018 17:55:02] INFO [mysite:151] , , ]> +[22/Oct/2018 17:55:02] INFO [mysite:154] , ]> +[22/Oct/2018 17:55:02] INFO [mysite:159] ['bamberg'] +[22/Oct/2018 17:55:02] INFO [mysite:159] ['bamberg', 'test'] +[22/Oct/2018 17:55:14] INFO [mysite:151] , , ]> +[22/Oct/2018 17:55:14] INFO [mysite:154] , ]> +[22/Oct/2018 17:55:14] INFO [mysite:159] +[22/Oct/2018 17:55:14] INFO [mysite:159] +[22/Oct/2018 17:56:12] INFO [mysite:151] , , ]> +[22/Oct/2018 17:56:12] INFO [mysite:154] , ]> +[22/Oct/2018 17:56:12] INFO [mysite:159] bamberg +[22/Oct/2018 17:56:12] INFO [mysite:159] test +[22/Oct/2018 17:56:34] INFO [mysite:151] , , ]> +[22/Oct/2018 17:56:34] INFO [mysite:154] , ]> +[22/Oct/2018 17:56:34] INFO [mysite:159] bamberg +[22/Oct/2018 17:56:34] INFO [mysite:159] test +[22/Oct/2018 17:57:48] INFO [mysite:151] , , ]> +[22/Oct/2018 17:57:48] INFO [mysite:154] , ]> +[22/Oct/2018 17:57:49] INFO [mysite:159] +[22/Oct/2018 17:57:49] INFO [mysite:159] +[22/Oct/2018 17:59:46] INFO [mysite:151] , , ]> +[22/Oct/2018 17:59:46] INFO [mysite:154] , ]> +[22/Oct/2018 17:59:46] INFO [mysite:159] , , ]> +[22/Oct/2018 17:59:46] INFO [mysite:159] , , ]> +[22/Oct/2018 18:00:06] INFO [mysite:151] , , ]> +[22/Oct/2018 18:00:06] INFO [mysite:154] , ]> +[22/Oct/2018 18:00:06] INFO [mysite:159] , , ]> +[22/Oct/2018 18:00:06] INFO [mysite:159] , , ]> +[22/Oct/2018 18:00:15] INFO [mysite:151] , , ]> +[22/Oct/2018 18:00:15] INFO [mysite:154] , ]> +[22/Oct/2018 18:00:15] INFO [mysite:159] , , ]> +[22/Oct/2018 18:00:15] INFO [mysite:159] , , ]> +[22/Oct/2018 18:01:25] INFO [mysite:153] , ]> +[22/Oct/2018 18:01:25] INFO [mysite:158] , , ]> +[22/Oct/2018 18:01:25] INFO [mysite:158] , , ]> +[22/Oct/2018 18:01:45] INFO [mysite:153] , ]> +[22/Oct/2018 18:02:47] INFO [mysite:153] , ]> +[22/Oct/2018 18:03:12] INFO [mysite:153] , ]> +[22/Oct/2018 18:03:12] INFO [mysite:158] +[22/Oct/2018 18:03:12] INFO [mysite:158] +[22/Oct/2018 18:04:45] INFO [mysite:153] , ]> +[22/Oct/2018 18:05:24] INFO [mysite:153] , ]> +[22/Oct/2018 18:05:24] INFO [mysite:157] bamberg +[22/Oct/2018 18:05:24] INFO [mysite:159] +[22/Oct/2018 18:05:24] INFO [mysite:157] test +[22/Oct/2018 18:05:24] INFO [mysite:159] +[22/Oct/2018 18:13:35] INFO [mysite:153] , ]> +[22/Oct/2018 18:13:35] INFO [mysite:157] bamberg +[22/Oct/2018 18:14:16] INFO [mysite:153] , ]> +[22/Oct/2018 18:14:16] INFO [mysite:157] bamberg +[22/Oct/2018 18:19:16] INFO [mysite:153] , ]> +[22/Oct/2018 18:19:16] INFO [mysite:157] bamberg +[22/Oct/2018 18:38:14] INFO [mysite:153] , ]> +[22/Oct/2018 18:38:14] INFO [mysite:157] bamberg +[22/Oct/2018 18:41:50] INFO [mysite:153] , ]> +[22/Oct/2018 18:41:50] INFO [mysite:157] bamberg +[22/Oct/2018 18:41:50] INFO [mysite:159] , , ]> +[22/Oct/2018 18:41:50] INFO [mysite:157] test +[22/Oct/2018 18:41:50] INFO [mysite:159] , , ]> +[22/Oct/2018 18:45:55] INFO [mysite:153] , ]> +[22/Oct/2018 18:45:55] INFO [mysite:157] bamberg +[22/Oct/2018 18:46:39] INFO [mysite:153] , ]> +[22/Oct/2018 18:46:39] INFO [mysite:157] bamberg +[22/Oct/2018 18:46:39] INFO [mysite:159] +[22/Oct/2018 18:46:39] INFO [mysite:157] test +[22/Oct/2018 18:46:39] INFO [mysite:159] , , ]> +[22/Oct/2018 18:47:12] INFO [mysite:153] ]> +[22/Oct/2018 18:47:12] INFO [mysite:157] stefan +[22/Oct/2018 18:47:12] INFO [mysite:159] +[22/Oct/2018 18:48:22] INFO [mysite:153] ]> +[22/Oct/2018 18:48:22] INFO [mysite:157] stefan +[22/Oct/2018 18:48:22] INFO [mysite:159] ]> +[22/Oct/2018 18:50:37] INFO [mysite:153] ]> +[22/Oct/2018 18:50:38] INFO [mysite:157] stefan +[22/Oct/2018 18:52:01] INFO [mysite:153] ]> +[22/Oct/2018 18:52:01] INFO [mysite:157] stefan +[22/Oct/2018 18:52:36] INFO [mysite:153] ]> +[22/Oct/2018 18:52:36] INFO [mysite:157] stefan +[22/Oct/2018 18:53:07] INFO [mysite:153] ]> +[22/Oct/2018 18:53:07] INFO [mysite:157] stefan +[22/Oct/2018 18:53:20] INFO [mysite:153] , ]> +[22/Oct/2018 18:53:20] INFO [mysite:157] bamberg +[22/Oct/2018 18:53:20] INFO [mysite:157] test +[22/Oct/2018 18:54:05] INFO [mysite:153] , ]> +[22/Oct/2018 18:54:05] INFO [mysite:157] bamberg +[22/Oct/2018 18:54:20] INFO [mysite:153] , ]> +[22/Oct/2018 18:54:21] INFO [mysite:157] bamberg +[22/Oct/2018 18:54:21] INFO [mysite:157] test +[22/Oct/2018 18:54:46] INFO [mysite:152] , ]> +[22/Oct/2018 18:54:46] INFO [mysite:156] bamberg +[22/Oct/2018 18:54:46] INFO [mysite:156] test +[22/Oct/2018 18:55:43] INFO [mysite:152] , ]> +[22/Oct/2018 18:55:43] INFO [mysite:156] bamberg +[22/Oct/2018 18:55:43] INFO [mysite:156] test diff --git a/mysite/settings.py b/mysite/settings.py index 69fe217..dc0e8b4 100644 --- a/mysite/settings.py +++ b/mysite/settings.py @@ -13,6 +13,7 @@ https://docs.djangoproject.com/en/2.0/ref/settings/ import os import re import socket +import djcelery # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) @@ -46,6 +47,8 @@ INSTALLED_APPS = [ 'application', 'taggit', 'taggit_templatetags2', + 'djcelery', + 'kombu.transport.django', ] MIDDLEWARE = [ @@ -171,7 +174,7 @@ else: ] print(" --- Live stage --- ") - +AUTH_PROFILE_MODULE = 'application.CustomUser' #Log Configuration LOGGING = { @@ -249,4 +252,13 @@ if DEBUG: DEBUG_TOOLBAR_CONFIG = { 'INTERCEPT_REDIRECTS': False, - } \ No newline at end of file + } + +# Celery settings +BROKER_URL = 'django://' +CELERY_ACCEPT_CONTENT = ['json'] +CELERY_TASK_SERIALIZER = 'json' +CELERY_RESULT_SERIALIZER = 'json' +CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend' +CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler" +djcelery.setup_loader() \ No newline at end of file diff --git a/thesisenv/bin/celery b/thesisenv/bin/celery new file mode 100755 index 0000000..c9a04ea --- /dev/null +++ b/thesisenv/bin/celery @@ -0,0 +1,11 @@ +#!/Users/Esthi/thesis_ek/thesisenv/bin/python3 + +# -*- coding: utf-8 -*- +import re +import sys + +from celery.__main__ import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/thesisenv/bin/celerybeat b/thesisenv/bin/celerybeat new file mode 100755 index 0000000..0814434 --- /dev/null +++ b/thesisenv/bin/celerybeat @@ -0,0 +1,11 @@ +#!/Users/Esthi/thesis_ek/thesisenv/bin/python3 + +# -*- coding: utf-8 -*- +import re +import sys + +from celery.__main__ import _compat_beat + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(_compat_beat()) diff --git a/thesisenv/bin/celeryd b/thesisenv/bin/celeryd new file mode 100755 index 0000000..2012d05 --- /dev/null +++ b/thesisenv/bin/celeryd @@ -0,0 +1,11 @@ +#!/Users/Esthi/thesis_ek/thesisenv/bin/python3 + +# -*- coding: utf-8 -*- +import re +import sys + +from celery.__main__ import _compat_worker + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(_compat_worker()) diff --git a/thesisenv/bin/celeryd-multi b/thesisenv/bin/celeryd-multi new file mode 100755 index 0000000..f15d4f0 --- /dev/null +++ b/thesisenv/bin/celeryd-multi @@ -0,0 +1,11 @@ +#!/Users/Esthi/thesis_ek/thesisenv/bin/python3 + +# -*- coding: utf-8 -*- +import re +import sys + +from celery.__main__ import _compat_multi + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(_compat_multi()) diff --git a/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/DESCRIPTION.rst b/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..6da1c12 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/DESCRIPTION.rst @@ -0,0 +1,106 @@ +===================================================================== + Python AMQP 0.9.1 client library +===================================================================== + +:Version: 1.4.9 +:Web: http://amqp.readthedocs.org/ +:Download: http://pypi.python.org/pypi/amqp/ +:Source: http://github.com/celery/py-amqp/ +:Keywords: amqp, rabbitmq + +About +===== + +This is a fork of amqplib_ which was originally written by Barry Pederson. +It is maintained by the Celery_ project, and used by `kombu`_ as a pure python +alternative when `librabbitmq`_ is not available. + +This library should be API compatible with `librabbitmq`_. + +.. _amqplib: http://pypi.python.org/pypi/amqplib +.. _Celery: http://celeryproject.org/ +.. _kombu: http://kombu.readthedocs.org/ +.. _librabbitmq: http://pypi.python.org/pypi/librabbitmq + +Differences from `amqplib`_ +=========================== + +- Supports draining events from multiple channels (``Connection.drain_events``) +- Support for timeouts +- Channels are restored after channel error, instead of having to close the + connection. +- Support for heartbeats + + - ``Connection.heartbeat_tick(rate=2)`` must called at regular intervals + (half of the heartbeat value if rate is 2). + - Or some other scheme by using ``Connection.send_heartbeat``. +- Supports RabbitMQ extensions: + - Consumer Cancel Notifications + - by default a cancel results in ``ChannelError`` being raised + - but not if a ``on_cancel`` callback is passed to ``basic_consume``. + - Publisher confirms + - ``Channel.confirm_select()`` enables publisher confirms. + - ``Channel.events['basic_ack'].append(my_callback)`` adds a callback + to be called when a message is confirmed. This callback is then + called with the signature ``(delivery_tag, multiple)``. + - Exchange-to-exchange bindings: ``exchange_bind`` / ``exchange_unbind``. + - ``Channel.confirm_select()`` enables publisher confirms. + - ``Channel.events['basic_ack'].append(my_callback)`` adds a callback + to be called when a message is confirmed. This callback is then + called with the signature ``(delivery_tag, multiple)``. +- Support for ``basic_return`` +- Uses AMQP 0-9-1 instead of 0-8. + - ``Channel.access_request`` and ``ticket`` arguments to methods + **removed**. + - Supports the ``arguments`` argument to ``basic_consume``. + - ``internal`` argument to ``exchange_declare`` removed. + - ``auto_delete`` argument to ``exchange_declare`` deprecated + - ``insist`` argument to ``Connection`` removed. + - ``Channel.alerts`` has been removed. + - Support for ``Channel.basic_recover_async``. + - ``Channel.basic_recover`` deprecated. +- Exceptions renamed to have idiomatic names: + - ``AMQPException`` -> ``AMQPError`` + - ``AMQPConnectionException`` -> ConnectionError`` + - ``AMQPChannelException`` -> ChannelError`` + - ``Connection.known_hosts`` removed. + - ``Connection`` no longer supports redirects. + - ``exchange`` argument to ``queue_bind`` can now be empty + to use the "default exchange". +- Adds ``Connection.is_alive`` that tries to detect + whether the connection can still be used. +- Adds ``Connection.connection_errors`` and ``.channel_errors``, + a list of recoverable errors. +- Exposes the underlying socket as ``Connection.sock``. +- Adds ``Channel.no_ack_consumers`` to keep track of consumer tags + that set the no_ack flag. +- Slightly better at error recovery + +Further +======= + +- Differences between AMQP 0.8 and 0.9.1 + + http://www.rabbitmq.com/amqp-0-8-to-0-9-1.html + +- AMQP 0.9.1 Quick Reference + + http://www.rabbitmq.com/amqp-0-9-1-quickref.html + +- RabbitMQ Extensions + + http://www.rabbitmq.com/extensions.html + +- For more information about AMQP, visit + + http://www.amqp.org + +- For other Python client libraries see: + + http://www.rabbitmq.com/devtools.html#python-dev + +.. image:: https://d2weczhvl823v0.cloudfront.net/celery/celery/trend.png + :alt: Bitdeli badge + :target: https://bitdeli.com/free + + diff --git a/thesisenv/lib/python3.6/site-packages/pip-18.0.dist-info/INSTALLER b/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/INSTALLER similarity index 100% rename from thesisenv/lib/python3.6/site-packages/pip-18.0.dist-info/INSTALLER rename to thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/INSTALLER diff --git a/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/METADATA b/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/METADATA new file mode 100644 index 0000000..42fb903 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/METADATA @@ -0,0 +1,130 @@ +Metadata-Version: 2.0 +Name: amqp +Version: 1.4.9 +Summary: Low-level AMQP client for Python (fork of amqplib) +Home-page: http://github.com/celery/py-amqp +Author: Ask Solem +Author-email: pyamqp@celeryproject.org +License: LGPL +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.0 +Classifier: Programming Language :: Python :: 3.1 +Classifier: Programming Language :: Python :: 3.2 +Classifier: Programming Language :: Python :: 3.3 +Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL) +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent + +===================================================================== + Python AMQP 0.9.1 client library +===================================================================== + +:Version: 1.4.9 +:Web: http://amqp.readthedocs.org/ +:Download: http://pypi.python.org/pypi/amqp/ +:Source: http://github.com/celery/py-amqp/ +:Keywords: amqp, rabbitmq + +About +===== + +This is a fork of amqplib_ which was originally written by Barry Pederson. +It is maintained by the Celery_ project, and used by `kombu`_ as a pure python +alternative when `librabbitmq`_ is not available. + +This library should be API compatible with `librabbitmq`_. + +.. _amqplib: http://pypi.python.org/pypi/amqplib +.. _Celery: http://celeryproject.org/ +.. _kombu: http://kombu.readthedocs.org/ +.. _librabbitmq: http://pypi.python.org/pypi/librabbitmq + +Differences from `amqplib`_ +=========================== + +- Supports draining events from multiple channels (``Connection.drain_events``) +- Support for timeouts +- Channels are restored after channel error, instead of having to close the + connection. +- Support for heartbeats + + - ``Connection.heartbeat_tick(rate=2)`` must called at regular intervals + (half of the heartbeat value if rate is 2). + - Or some other scheme by using ``Connection.send_heartbeat``. +- Supports RabbitMQ extensions: + - Consumer Cancel Notifications + - by default a cancel results in ``ChannelError`` being raised + - but not if a ``on_cancel`` callback is passed to ``basic_consume``. + - Publisher confirms + - ``Channel.confirm_select()`` enables publisher confirms. + - ``Channel.events['basic_ack'].append(my_callback)`` adds a callback + to be called when a message is confirmed. This callback is then + called with the signature ``(delivery_tag, multiple)``. + - Exchange-to-exchange bindings: ``exchange_bind`` / ``exchange_unbind``. + - ``Channel.confirm_select()`` enables publisher confirms. + - ``Channel.events['basic_ack'].append(my_callback)`` adds a callback + to be called when a message is confirmed. This callback is then + called with the signature ``(delivery_tag, multiple)``. +- Support for ``basic_return`` +- Uses AMQP 0-9-1 instead of 0-8. + - ``Channel.access_request`` and ``ticket`` arguments to methods + **removed**. + - Supports the ``arguments`` argument to ``basic_consume``. + - ``internal`` argument to ``exchange_declare`` removed. + - ``auto_delete`` argument to ``exchange_declare`` deprecated + - ``insist`` argument to ``Connection`` removed. + - ``Channel.alerts`` has been removed. + - Support for ``Channel.basic_recover_async``. + - ``Channel.basic_recover`` deprecated. +- Exceptions renamed to have idiomatic names: + - ``AMQPException`` -> ``AMQPError`` + - ``AMQPConnectionException`` -> ConnectionError`` + - ``AMQPChannelException`` -> ChannelError`` + - ``Connection.known_hosts`` removed. + - ``Connection`` no longer supports redirects. + - ``exchange`` argument to ``queue_bind`` can now be empty + to use the "default exchange". +- Adds ``Connection.is_alive`` that tries to detect + whether the connection can still be used. +- Adds ``Connection.connection_errors`` and ``.channel_errors``, + a list of recoverable errors. +- Exposes the underlying socket as ``Connection.sock``. +- Adds ``Channel.no_ack_consumers`` to keep track of consumer tags + that set the no_ack flag. +- Slightly better at error recovery + +Further +======= + +- Differences between AMQP 0.8 and 0.9.1 + + http://www.rabbitmq.com/amqp-0-8-to-0-9-1.html + +- AMQP 0.9.1 Quick Reference + + http://www.rabbitmq.com/amqp-0-9-1-quickref.html + +- RabbitMQ Extensions + + http://www.rabbitmq.com/extensions.html + +- For more information about AMQP, visit + + http://www.amqp.org + +- For other Python client libraries see: + + http://www.rabbitmq.com/devtools.html#python-dev + +.. image:: https://d2weczhvl823v0.cloudfront.net/celery/celery/trend.png + :alt: Bitdeli badge + :target: https://bitdeli.com/free + + diff --git a/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/RECORD b/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/RECORD new file mode 100644 index 0000000..5c3456e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/RECORD @@ -0,0 +1,37 @@ +amqp/__init__.py,sha256=BeETUDbn4gfRRlDZLbYR87Baj4OfZhhhw7zPw8BcJ34,2126 +amqp/abstract_channel.py,sha256=CzinrOqXSVnzVpH5Cdm_zYJaW929AGqeQfRw-PMX74s,3429 +amqp/basic_message.py,sha256=owh1E_CBDfu99xKDD4nDFDRf5aHkoIjU3KH8iDd7PWM,3954 +amqp/channel.py,sha256=xldCRKo4Jzri8ryLlaozKa5Vp7B-KIilDzfhXKyCjbE,84236 +amqp/connection.py,sha256=SGqZ4aYMwpy8C8-WG2XZZ5Vsgxog7dDN57k2UTMV8ck,34235 +amqp/exceptions.py,sha256=ywAWGUJbSDpcKpvLgddmu2j4N1nvLWeMtaJIdlZ8TyQ,6852 +amqp/five.py,sha256=-KE33qs2B6f9N4PAby-zb6VqQu0UEPgKELjZl-8sx6E,5457 +amqp/method_framing.py,sha256=wP9XRw3cL0WXLAC7DpdK2HseTikK3vVm20IS0VYzbTw,8051 +amqp/protocol.py,sha256=luFIgRWsD0vy3pupwiSJBaxWvARKTOSm9DrHuAwzk60,310 +amqp/serialization.py,sha256=exC7GNCivp4B_5bzui2a-Swlb1MGfKmqnmlB3Jc9xSs,16315 +amqp/transport.py,sha256=jjZjSQYRfCmMa8Nba9E-NNLvjWkNix2HYFNJlF00KhQ,10020 +amqp/utils.py,sha256=NaBiCf_ZllC7wVYZ0yAcd_uJcmi5UDRD_w3PGVAS9M4,2685 +amqp/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +amqp/tests/case.py,sha256=yLcb_0hCb74IuK2b57kP0z8eAEjKvhKALZoi-JSQUmY,1973 +amqp/tests/test_channel.py,sha256=xZGBSd9UhfuxqWmXRHy_m1zCGQVguUDoVL00EXLUmVg,1087 +amqp-1.4.9.dist-info/DESCRIPTION.rst,sha256=ayW656JUsSRXWw0dojo6CW7PJ6wHqWyd98YASJOnd2M,4028 +amqp-1.4.9.dist-info/METADATA,sha256=4yS7juxlaSN_UIjVpa7sD3Of86aCk_2V98LEJgOm3IM,4994 +amqp-1.4.9.dist-info/metadata.json,sha256=hIn8inTFt5lK65sSohqxv_qmsblygeJGtaIVh4tRYV4,1175 +amqp-1.4.9.dist-info/RECORD,, +amqp-1.4.9.dist-info/top_level.txt,sha256=tWQNmFVhU4UtDgB6Yy2lKqRz7LtOrRcN8_bPFVcVVR8,5 +amqp-1.4.9.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110 +amqp-1.4.9.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +amqp/tests/__pycache__/case.cpython-36.pyc,, +amqp/tests/__pycache__/test_channel.cpython-36.pyc,, +amqp/tests/__pycache__/__init__.cpython-36.pyc,, +amqp/__pycache__/abstract_channel.cpython-36.pyc,, +amqp/__pycache__/exceptions.cpython-36.pyc,, +amqp/__pycache__/connection.cpython-36.pyc,, +amqp/__pycache__/channel.cpython-36.pyc,, +amqp/__pycache__/five.cpython-36.pyc,, +amqp/__pycache__/basic_message.cpython-36.pyc,, +amqp/__pycache__/transport.cpython-36.pyc,, +amqp/__pycache__/utils.cpython-36.pyc,, +amqp/__pycache__/method_framing.cpython-36.pyc,, +amqp/__pycache__/serialization.cpython-36.pyc,, +amqp/__pycache__/__init__.cpython-36.pyc,, +amqp/__pycache__/protocol.cpython-36.pyc,, diff --git a/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/WHEEL b/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/WHEEL new file mode 100644 index 0000000..9dff69d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.24.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/metadata.json b/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/metadata.json new file mode 100644 index 0000000..ba647b5 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/metadata.json @@ -0,0 +1 @@ +{"license": "LGPL", "name": "amqp", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "test_requires": [{"requires": ["unittest2 (>=0.4.0)", "nose", "nose-cover3", "coverage (>=3.0)", "mock"]}], "summary": "Low-level AMQP client for Python (fork of amqplib)", "platform": "any", "version": "1.4.9", "extensions": {"python.details": {"project_urls": {"Home": "http://github.com/celery/py-amqp"}, "document_names": {"description": "DESCRIPTION.rst"}, "contacts": [{"role": "author", "email": "pyamqp@celeryproject.org", "name": "Ask Solem"}]}}, "classifiers": ["Development Status :: 5 - Production/Stable", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.0", "Programming Language :: Python :: 3.1", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent"]} \ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/top_level.txt b/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/top_level.txt new file mode 100644 index 0000000..5e610d3 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp-1.4.9.dist-info/top_level.txt @@ -0,0 +1 @@ +amqp diff --git a/thesisenv/lib/python3.6/site-packages/amqp/__init__.py b/thesisenv/lib/python3.6/site-packages/amqp/__init__.py new file mode 100644 index 0000000..9c39c43 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp/__init__.py @@ -0,0 +1,70 @@ +"""Low-level AMQP client for Python (fork of amqplib)""" +# Copyright (C) 2007-2008 Barry Pederson +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 +from __future__ import absolute_import + +VERSION = (1, 4, 9) +__version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:]) +__author__ = 'Barry Pederson' +__maintainer__ = 'Ask Solem' +__contact__ = 'pyamqp@celeryproject.org' +__homepage__ = 'http://github.com/celery/py-amqp' +__docformat__ = 'restructuredtext' + +# -eof meta- + +# +# Pull in the public items from the various sub-modules +# +from .basic_message import Message # noqa +from .channel import Channel # noqa +from .connection import Connection # noqa +from .exceptions import ( # noqa + AMQPError, + ConnectionError, + RecoverableConnectionError, + IrrecoverableConnectionError, + ChannelError, + RecoverableChannelError, + IrrecoverableChannelError, + ConsumerCancelled, + ContentTooLarge, + NoConsumers, + ConnectionForced, + InvalidPath, + AccessRefused, + NotFound, + ResourceLocked, + PreconditionFailed, + FrameError, + FrameSyntaxError, + InvalidCommand, + ChannelNotOpen, + UnexpectedFrame, + ResourceError, + NotAllowed, + AMQPNotImplementedError, + InternalError, + error_for_code, + __all__ as _all_exceptions, +) +from .utils import promise # noqa + +__all__ = [ + 'Connection', + 'Channel', + 'Message', +] + _all_exceptions diff --git a/thesisenv/lib/python3.6/site-packages/amqp/abstract_channel.py b/thesisenv/lib/python3.6/site-packages/amqp/abstract_channel.py new file mode 100644 index 0000000..62fca89 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp/abstract_channel.py @@ -0,0 +1,93 @@ +"""Code common to Connection and Channel objects.""" +# Copyright (C) 2007-2008 Barry Pederson ) +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 +from __future__ import absolute_import + +from .exceptions import AMQPNotImplementedError, RecoverableConnectionError +from .serialization import AMQPWriter + +__all__ = ['AbstractChannel'] + + +class AbstractChannel(object): + """Superclass for both the Connection, which is treated + as channel 0, and other user-created Channel objects. + + The subclasses must have a _METHOD_MAP class property, mapping + between AMQP method signatures and Python methods. + + """ + def __init__(self, connection, channel_id): + self.connection = connection + self.channel_id = channel_id + connection.channels[channel_id] = self + self.method_queue = [] # Higher level queue for methods + self.auto_decode = False + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + + def _send_method(self, method_sig, args=bytes(), content=None): + """Send a method for our channel.""" + conn = self.connection + if conn is None: + raise RecoverableConnectionError('connection already closed') + + if isinstance(args, AMQPWriter): + args = args.getvalue() + + conn.method_writer.write_method( + self.channel_id, method_sig, args, content, + ) + + def close(self): + """Close this Channel or Connection""" + raise NotImplementedError('Must be overriden in subclass') + + def wait(self, allowed_methods=None, timeout=None): + """Wait for a method that matches our allowed_methods parameter (the + default value of None means match any method), and dispatch to it.""" + method_sig, args, content = self.connection._wait_method( + self.channel_id, allowed_methods, timeout) + + return self.dispatch_method(method_sig, args, content) + + def dispatch_method(self, method_sig, args, content): + if content and \ + self.auto_decode and \ + hasattr(content, 'content_encoding'): + try: + content.body = content.body.decode(content.content_encoding) + except Exception: + pass + + try: + amqp_method = self._METHOD_MAP[method_sig] + except KeyError: + raise AMQPNotImplementedError( + 'Unknown AMQP method {0!r}'.format(method_sig)) + + if content is None: + return amqp_method(self, args) + else: + return amqp_method(self, args, content) + + #: Placeholder, the concrete implementations will have to + #: supply their own versions of _METHOD_MAP + _METHOD_MAP = {} diff --git a/thesisenv/lib/python3.6/site-packages/amqp/basic_message.py b/thesisenv/lib/python3.6/site-packages/amqp/basic_message.py new file mode 100644 index 0000000..192ede9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp/basic_message.py @@ -0,0 +1,124 @@ +"""Messages for AMQP""" +# Copyright (C) 2007-2008 Barry Pederson +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 +from __future__ import absolute_import + +from .serialization import GenericContent + +__all__ = ['Message'] + + +class Message(GenericContent): + """A Message for use with the Channnel.basic_* methods.""" + + #: Instances of this class have these attributes, which + #: are passed back and forth as message properties between + #: client and server + PROPERTIES = [ + ('content_type', 'shortstr'), + ('content_encoding', 'shortstr'), + ('application_headers', 'table'), + ('delivery_mode', 'octet'), + ('priority', 'octet'), + ('correlation_id', 'shortstr'), + ('reply_to', 'shortstr'), + ('expiration', 'shortstr'), + ('message_id', 'shortstr'), + ('timestamp', 'timestamp'), + ('type', 'shortstr'), + ('user_id', 'shortstr'), + ('app_id', 'shortstr'), + ('cluster_id', 'shortstr') + ] + + def __init__(self, body='', children=None, channel=None, **properties): + """Expected arg types + + body: string + children: (not supported) + + Keyword properties may include: + + content_type: shortstr + MIME content type + + content_encoding: shortstr + MIME content encoding + + application_headers: table + Message header field table, a dict with string keys, + and string | int | Decimal | datetime | dict values. + + delivery_mode: octet + Non-persistent (1) or persistent (2) + + priority: octet + The message priority, 0 to 9 + + correlation_id: shortstr + The application correlation identifier + + reply_to: shortstr + The destination to reply to + + expiration: shortstr + Message expiration specification + + message_id: shortstr + The application message identifier + + timestamp: datetime.datetime + The message timestamp + + type: shortstr + The message type name + + user_id: shortstr + The creating user id + + app_id: shortstr + The creating application id + + cluster_id: shortstr + Intra-cluster routing identifier + + Unicode bodies are encoded according to the 'content_encoding' + argument. If that's None, it's set to 'UTF-8' automatically. + + example:: + + msg = Message('hello world', + content_type='text/plain', + application_headers={'foo': 7}) + + """ + super(Message, self).__init__(**properties) + self.body = body + self.channel = channel + + def __eq__(self, other): + """Check if the properties and bodies of this Message and another + Message are the same. + + Received messages may contain a 'delivery_info' attribute, + which isn't compared. + + """ + try: + return (super(Message, self).__eq__(other) and + self.body == other.body) + except AttributeError: + return NotImplemented diff --git a/thesisenv/lib/python3.6/site-packages/amqp/channel.py b/thesisenv/lib/python3.6/site-packages/amqp/channel.py new file mode 100644 index 0000000..ff6a4ae --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp/channel.py @@ -0,0 +1,2550 @@ +"""AMQP Channels""" +# Copyright (C) 2007-2008 Barry Pederson +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 +from __future__ import absolute_import + +import logging + +from collections import defaultdict +from warnings import warn + +from .abstract_channel import AbstractChannel +from .exceptions import ( + ChannelError, ConsumerCancelled, NotConfirmed, error_for_code, +) +from .five import Queue +from .protocol import basic_return_t, queue_declare_ok_t +from .serialization import AMQPWriter + +__all__ = ['Channel'] + +AMQP_LOGGER = logging.getLogger('amqp') + +EXCHANGE_AUTODELETE_DEPRECATED = """\ +The auto_delete flag for exchanges has been deprecated and will be removed +from py-amqp v1.5.0.\ +""" + + +class VDeprecationWarning(DeprecationWarning): + pass + + +class Channel(AbstractChannel): + """Work with channels + + The channel class provides methods for a client to establish a + virtual connection - a channel - to a server and for both peers to + operate the virtual connection thereafter. + + GRAMMAR:: + + channel = open-channel *use-channel close-channel + open-channel = C:OPEN S:OPEN-OK + use-channel = C:FLOW S:FLOW-OK + / S:FLOW C:FLOW-OK + / functional-class + close-channel = C:CLOSE S:CLOSE-OK + / S:CLOSE C:CLOSE-OK + + """ + + def __init__(self, connection, channel_id=None, auto_decode=True): + """Create a channel bound to a connection and using the specified + numeric channel_id, and open on the server. + + The 'auto_decode' parameter (defaults to True), indicates + whether the library should attempt to decode the body + of Messages to a Unicode string if there's a 'content_encoding' + property for the message. If there's no 'content_encoding' + property, or the decode raises an Exception, the message body + is left as plain bytes. + + """ + if channel_id: + connection._claim_channel_id(channel_id) + else: + channel_id = connection._get_free_channel_id() + + AMQP_LOGGER.debug('using channel_id: %d', channel_id) + + super(Channel, self).__init__(connection, channel_id) + + self.is_open = False + self.active = True # Flow control + self.returned_messages = Queue() + self.callbacks = {} + self.cancel_callbacks = {} + self.auto_decode = auto_decode + self.events = defaultdict(set) + self.no_ack_consumers = set() + + # set first time basic_publish_confirm is called + # and publisher confirms are enabled for this channel. + self._confirm_selected = False + if self.connection.confirm_publish: + self.basic_publish = self.basic_publish_confirm + + self._x_open() + + def _do_close(self): + """Tear down this object, after we've agreed to close + with the server.""" + AMQP_LOGGER.debug('Closed channel #%d', self.channel_id) + self.is_open = False + channel_id, self.channel_id = self.channel_id, None + connection, self.connection = self.connection, None + if connection: + connection.channels.pop(channel_id, None) + connection._avail_channel_ids.append(channel_id) + self.callbacks.clear() + self.cancel_callbacks.clear() + self.events.clear() + self.no_ack_consumers.clear() + + def _do_revive(self): + self.is_open = False + self._x_open() + + def close(self, reply_code=0, reply_text='', method_sig=(0, 0)): + """Request a channel close + + This method indicates that the sender wants to close the + channel. This may be due to internal conditions (e.g. a forced + shut-down) or due to an error handling a specific method, i.e. + an exception. When a close is due to an exception, the sender + provides the class and method id of the method which caused + the exception. + + RULE: + + After sending this method any received method except + Channel.Close-OK MUST be discarded. + + RULE: + + The peer sending this method MAY use a counter or timeout + to detect failure of the other peer to respond correctly + with Channel.Close-OK.. + + PARAMETERS: + reply_code: short + + The reply code. The AMQ reply codes are defined in AMQ + RFC 011. + + reply_text: shortstr + + The localised reply text. This text can be logged as an + aid to resolving issues. + + class_id: short + + failing method class + + When the close is provoked by a method exception, this + is the class of the method. + + method_id: short + + failing method ID + + When the close is provoked by a method exception, this + is the ID of the method. + + """ + try: + if not self.is_open or self.connection is None: + return + + args = AMQPWriter() + args.write_short(reply_code) + args.write_shortstr(reply_text) + args.write_short(method_sig[0]) # class_id + args.write_short(method_sig[1]) # method_id + self._send_method((20, 40), args) + return self.wait(allowed_methods=[ + (20, 40), # Channel.close + (20, 41), # Channel.close_ok + ]) + finally: + self.connection = None + + def _close(self, args): + """Request a channel close + + This method indicates that the sender wants to close the + channel. This may be due to internal conditions (e.g. a forced + shut-down) or due to an error handling a specific method, i.e. + an exception. When a close is due to an exception, the sender + provides the class and method id of the method which caused + the exception. + + RULE: + + After sending this method any received method except + Channel.Close-OK MUST be discarded. + + RULE: + + The peer sending this method MAY use a counter or timeout + to detect failure of the other peer to respond correctly + with Channel.Close-OK.. + + PARAMETERS: + reply_code: short + + The reply code. The AMQ reply codes are defined in AMQ + RFC 011. + + reply_text: shortstr + + The localised reply text. This text can be logged as an + aid to resolving issues. + + class_id: short + + failing method class + + When the close is provoked by a method exception, this + is the class of the method. + + method_id: short + + failing method ID + + When the close is provoked by a method exception, this + is the ID of the method. + + """ + + reply_code = args.read_short() + reply_text = args.read_shortstr() + class_id = args.read_short() + method_id = args.read_short() + + self._send_method((20, 41)) + self._do_revive() + + raise error_for_code( + reply_code, reply_text, (class_id, method_id), ChannelError, + ) + + def _close_ok(self, args): + """Confirm a channel close + + This method confirms a Channel.Close method and tells the + recipient that it is safe to release resources for the channel + and close the socket. + + RULE: + + A peer that detects a socket closure without having + received a Channel.Close-Ok handshake method SHOULD log + the error. + + """ + self._do_close() + + def flow(self, active): + """Enable/disable flow from peer + + This method asks the peer to pause or restart the flow of + content data. This is a simple flow-control mechanism that a + peer can use to avoid oveflowing its queues or otherwise + finding itself receiving more messages than it can process. + Note that this method is not intended for window control. The + peer that receives a request to stop sending content should + finish sending the current content, if any, and then wait + until it receives a Flow restart method. + + RULE: + + When a new channel is opened, it is active. Some + applications assume that channels are inactive until + started. To emulate this behaviour a client MAY open the + channel, then pause it. + + RULE: + + When sending content data in multiple frames, a peer + SHOULD monitor the channel for incoming methods and + respond to a Channel.Flow as rapidly as possible. + + RULE: + + A peer MAY use the Channel.Flow method to throttle + incoming content data for internal reasons, for example, + when exchangeing data over a slower connection. + + RULE: + + The peer that requests a Channel.Flow method MAY + disconnect and/or ban a peer that does not respect the + request. + + PARAMETERS: + active: boolean + + start/stop content frames + + If True, the peer starts sending content frames. If + False, the peer stops sending content frames. + + """ + args = AMQPWriter() + args.write_bit(active) + self._send_method((20, 20), args) + return self.wait(allowed_methods=[ + (20, 21), # Channel.flow_ok + ]) + + def _flow(self, args): + """Enable/disable flow from peer + + This method asks the peer to pause or restart the flow of + content data. This is a simple flow-control mechanism that a + peer can use to avoid oveflowing its queues or otherwise + finding itself receiving more messages than it can process. + Note that this method is not intended for window control. The + peer that receives a request to stop sending content should + finish sending the current content, if any, and then wait + until it receives a Flow restart method. + + RULE: + + When a new channel is opened, it is active. Some + applications assume that channels are inactive until + started. To emulate this behaviour a client MAY open the + channel, then pause it. + + RULE: + + When sending content data in multiple frames, a peer + SHOULD monitor the channel for incoming methods and + respond to a Channel.Flow as rapidly as possible. + + RULE: + + A peer MAY use the Channel.Flow method to throttle + incoming content data for internal reasons, for example, + when exchangeing data over a slower connection. + + RULE: + + The peer that requests a Channel.Flow method MAY + disconnect and/or ban a peer that does not respect the + request. + + PARAMETERS: + active: boolean + + start/stop content frames + + If True, the peer starts sending content frames. If + False, the peer stops sending content frames. + + """ + self.active = args.read_bit() + self._x_flow_ok(self.active) + + def _x_flow_ok(self, active): + """Confirm a flow method + + Confirms to the peer that a flow command was received and + processed. + + PARAMETERS: + active: boolean + + current flow setting + + Confirms the setting of the processed flow method: + True means the peer will start sending or continue + to send content frames; False means it will not. + + """ + args = AMQPWriter() + args.write_bit(active) + self._send_method((20, 21), args) + + def _flow_ok(self, args): + """Confirm a flow method + + Confirms to the peer that a flow command was received and + processed. + + PARAMETERS: + active: boolean + + current flow setting + + Confirms the setting of the processed flow method: + True means the peer will start sending or continue + to send content frames; False means it will not. + + """ + return args.read_bit() + + def _x_open(self): + """Open a channel for use + + This method opens a virtual connection (a channel). + + RULE: + + This method MUST NOT be called when the channel is already + open. + + PARAMETERS: + out_of_band: shortstr (DEPRECATED) + + out-of-band settings + + Configures out-of-band transfers on this channel. The + syntax and meaning of this field will be formally + defined at a later date. + + """ + if self.is_open: + return + + args = AMQPWriter() + args.write_shortstr('') # out_of_band: deprecated + self._send_method((20, 10), args) + return self.wait(allowed_methods=[ + (20, 11), # Channel.open_ok + ]) + + def _open_ok(self, args): + """Signal that the channel is ready + + This method signals to the client that the channel is ready + for use. + + """ + self.is_open = True + AMQP_LOGGER.debug('Channel open') + + ############# + # + # Exchange + # + # + # work with exchanges + # + # Exchanges match and distribute messages across queues. + # Exchanges can be configured in the server or created at runtime. + # + # GRAMMAR:: + # + # exchange = C:DECLARE S:DECLARE-OK + # / C:DELETE S:DELETE-OK + # + # RULE: + # + # The server MUST implement the direct and fanout exchange + # types, and predeclare the corresponding exchanges named + # amq.direct and amq.fanout in each virtual host. The server + # MUST also predeclare a direct exchange to act as the default + # exchange for content Publish methods and for default queue + # bindings. + # + # RULE: + # + # The server SHOULD implement the topic exchange type, and + # predeclare the corresponding exchange named amq.topic in + # each virtual host. + # + # RULE: + # + # The server MAY implement the system exchange type, and + # predeclare the corresponding exchanges named amq.system in + # each virtual host. If the client attempts to bind a queue to + # the system exchange, the server MUST raise a connection + # exception with reply code 507 (not allowed). + # + + def exchange_declare(self, exchange, type, passive=False, durable=False, + auto_delete=True, nowait=False, arguments=None): + """Declare exchange, create if needed + + This method creates an exchange if it does not already exist, + and if the exchange exists, verifies that it is of the correct + and expected class. + + RULE: + + The server SHOULD support a minimum of 16 exchanges per + virtual host and ideally, impose no limit except as + defined by available resources. + + PARAMETERS: + exchange: shortstr + + RULE: + + Exchange names starting with "amq." are reserved + for predeclared and standardised exchanges. If + the client attempts to create an exchange starting + with "amq.", the server MUST raise a channel + exception with reply code 403 (access refused). + + type: shortstr + + exchange type + + Each exchange belongs to one of a set of exchange + types implemented by the server. The exchange types + define the functionality of the exchange - i.e. how + messages are routed through it. It is not valid or + meaningful to attempt to change the type of an + existing exchange. + + RULE: + + If the exchange already exists with a different + type, the server MUST raise a connection exception + with a reply code 507 (not allowed). + + RULE: + + If the server does not support the requested + exchange type it MUST raise a connection exception + with a reply code 503 (command invalid). + + passive: boolean + + do not create exchange + + If set, the server will not create the exchange. The + client can use this to check whether an exchange + exists without modifying the server state. + + RULE: + + If set, and the exchange does not already exist, + the server MUST raise a channel exception with + reply code 404 (not found). + + durable: boolean + + request a durable exchange + + If set when creating a new exchange, the exchange will + be marked as durable. Durable exchanges remain active + when a server restarts. Non-durable exchanges + (transient exchanges) are purged if/when a server + restarts. + + RULE: + + The server MUST support both durable and transient + exchanges. + + RULE: + + The server MUST ignore the durable field if the + exchange already exists. + + auto_delete: boolean + + auto-delete when unused + + If set, the exchange is deleted when all queues have + finished using it. + + RULE: + + The server SHOULD allow for a reasonable delay + between the point when it determines that an + exchange is not being used (or no longer used), + and the point when it deletes the exchange. At + the least it must allow a client to create an + exchange and then bind a queue to it, with a small + but non-zero delay between these two actions. + + RULE: + + The server MUST ignore the auto-delete field if + the exchange already exists. + + nowait: boolean + + do not send a reply method + + If set, the server will not respond to the method. The + client should not wait for a reply method. If the + server could not complete the method it will raise a + channel or connection exception. + + arguments: table + + arguments for declaration + + A set of arguments for the declaration. The syntax and + semantics of these arguments depends on the server + implementation. This field is ignored if passive is + True. + + """ + arguments = {} if arguments is None else arguments + args = AMQPWriter() + args.write_short(0) + args.write_shortstr(exchange) + args.write_shortstr(type) + args.write_bit(passive) + args.write_bit(durable) + args.write_bit(auto_delete) + args.write_bit(False) # internal: deprecated + args.write_bit(nowait) + args.write_table(arguments) + self._send_method((40, 10), args) + + if auto_delete: + warn(VDeprecationWarning(EXCHANGE_AUTODELETE_DEPRECATED)) + + if not nowait: + return self.wait(allowed_methods=[ + (40, 11), # Channel.exchange_declare_ok + ]) + + def _exchange_declare_ok(self, args): + """Confirms an exchange declaration + + This method confirms a Declare method and confirms the name of + the exchange, essential for automatically-named exchanges. + + """ + pass + + def exchange_delete(self, exchange, if_unused=False, nowait=False): + """Delete an exchange + + This method deletes an exchange. When an exchange is deleted + all queue bindings on the exchange are cancelled. + + PARAMETERS: + exchange: shortstr + + RULE: + + The exchange MUST exist. Attempting to delete a + non-existing exchange causes a channel exception. + + if_unused: boolean + + delete only if unused + + If set, the server will only delete the exchange if it + has no queue bindings. If the exchange has queue + bindings the server does not delete it but raises a + channel exception instead. + + RULE: + + If set, the server SHOULD delete the exchange but + only if it has no queue bindings. + + RULE: + + If set, the server SHOULD raise a channel + exception if the exchange is in use. + + nowait: boolean + + do not send a reply method + + If set, the server will not respond to the method. The + client should not wait for a reply method. If the + server could not complete the method it will raise a + channel or connection exception. + + """ + args = AMQPWriter() + args.write_short(0) + args.write_shortstr(exchange) + args.write_bit(if_unused) + args.write_bit(nowait) + self._send_method((40, 20), args) + + if not nowait: + return self.wait(allowed_methods=[ + (40, 21), # Channel.exchange_delete_ok + ]) + + def _exchange_delete_ok(self, args): + """Confirm deletion of an exchange + + This method confirms the deletion of an exchange. + + """ + pass + + def exchange_bind(self, destination, source='', routing_key='', + nowait=False, arguments=None): + """This method binds an exchange to an exchange. + + RULE: + + A server MUST allow and ignore duplicate bindings - that + is, two or more bind methods for a specific exchanges, + with identical arguments - without treating these as an + error. + + RULE: + + A server MUST allow cycles of exchange bindings to be + created including allowing an exchange to be bound to + itself. + + RULE: + + A server MUST not deliver the same message more than once + to a destination exchange, even if the topology of + exchanges and bindings results in multiple (even infinite) + routes to that exchange. + + PARAMETERS: + reserved-1: short + + destination: shortstr + + Specifies the name of the destination exchange to + bind. + + RULE: + + A client MUST NOT be allowed to bind a non- + existent destination exchange. + + RULE: + + The server MUST accept a blank exchange name to + mean the default exchange. + + source: shortstr + + Specifies the name of the source exchange to bind. + + RULE: + + A client MUST NOT be allowed to bind a non- + existent source exchange. + + RULE: + + The server MUST accept a blank exchange name to + mean the default exchange. + + routing-key: shortstr + + Specifies the routing key for the binding. The routing + key is used for routing messages depending on the + exchange configuration. Not all exchanges use a + routing key - refer to the specific exchange + documentation. + + no-wait: bit + + arguments: table + + A set of arguments for the binding. The syntax and + semantics of these arguments depends on the exchange + class. + + """ + arguments = {} if arguments is None else arguments + args = AMQPWriter() + args.write_short(0) + args.write_shortstr(destination) + args.write_shortstr(source) + args.write_shortstr(routing_key) + args.write_bit(nowait) + args.write_table(arguments) + self._send_method((40, 30), args) + + if not nowait: + return self.wait(allowed_methods=[ + (40, 31), # Channel.exchange_bind_ok + ]) + + def exchange_unbind(self, destination, source='', routing_key='', + nowait=False, arguments=None): + """This method unbinds an exchange from an exchange. + + RULE: + + If a unbind fails, the server MUST raise a connection + exception. + + PARAMETERS: + reserved-1: short + + destination: shortstr + + Specifies the name of the destination exchange to + unbind. + + RULE: + + The client MUST NOT attempt to unbind an exchange + that does not exist from an exchange. + + RULE: + + The server MUST accept a blank exchange name to + mean the default exchange. + + source: shortstr + + Specifies the name of the source exchange to unbind. + + RULE: + + The client MUST NOT attempt to unbind an exchange + from an exchange that does not exist. + + RULE: + + The server MUST accept a blank exchange name to + mean the default exchange. + + routing-key: shortstr + + Specifies the routing key of the binding to unbind. + + no-wait: bit + + arguments: table + + Specifies the arguments of the binding to unbind. + + """ + arguments = {} if arguments is None else arguments + args = AMQPWriter() + args.write_short(0) + args.write_shortstr(destination) + args.write_shortstr(source) + args.write_shortstr(routing_key) + args.write_bit(nowait) + args.write_table(arguments) + self._send_method((40, 40), args) + + if not nowait: + return self.wait(allowed_methods=[ + (40, 51), # Channel.exchange_unbind_ok + ]) + + def _exchange_bind_ok(self, args): + """Confirm bind successful + + This method confirms that the bind was successful. + + """ + pass + + def _exchange_unbind_ok(self, args): + """Confirm unbind successful + + This method confirms that the unbind was successful. + + """ + pass + + ############# + # + # Queue + # + # + # work with queues + # + # Queues store and forward messages. Queues can be configured in + # the server or created at runtime. Queues must be attached to at + # least one exchange in order to receive messages from publishers. + # + # GRAMMAR:: + # + # queue = C:DECLARE S:DECLARE-OK + # / C:BIND S:BIND-OK + # / C:PURGE S:PURGE-OK + # / C:DELETE S:DELETE-OK + # + # RULE: + # + # A server MUST allow any content class to be sent to any + # queue, in any mix, and queue and delivery these content + # classes independently. Note that all methods that fetch + # content off queues are specific to a given content class. + # + + def queue_bind(self, queue, exchange='', routing_key='', + nowait=False, arguments=None): + """Bind queue to an exchange + + This method binds a queue to an exchange. Until a queue is + bound it will not receive any messages. In a classic + messaging model, store-and-forward queues are bound to a dest + exchange and subscription queues are bound to a dest_wild + exchange. + + RULE: + + A server MUST allow ignore duplicate bindings - that is, + two or more bind methods for a specific queue, with + identical arguments - without treating these as an error. + + RULE: + + If a bind fails, the server MUST raise a connection + exception. + + RULE: + + The server MUST NOT allow a durable queue to bind to a + transient exchange. If the client attempts this the server + MUST raise a channel exception. + + RULE: + + Bindings for durable queues are automatically durable and + the server SHOULD restore such bindings after a server + restart. + + RULE: + + The server SHOULD support at least 4 bindings per queue, + and ideally, impose no limit except as defined by + available resources. + + PARAMETERS: + queue: shortstr + + Specifies the name of the queue to bind. If the queue + name is empty, refers to the current queue for the + channel, which is the last declared queue. + + RULE: + + If the client did not previously declare a queue, + and the queue name in this method is empty, the + server MUST raise a connection exception with + reply code 530 (not allowed). + + RULE: + + If the queue does not exist the server MUST raise + a channel exception with reply code 404 (not + found). + + exchange: shortstr + + The name of the exchange to bind to. + + RULE: + + If the exchange does not exist the server MUST + raise a channel exception with reply code 404 (not + found). + + routing_key: shortstr + + message routing key + + Specifies the routing key for the binding. The + routing key is used for routing messages depending on + the exchange configuration. Not all exchanges use a + routing key - refer to the specific exchange + documentation. If the routing key is empty and the + queue name is empty, the routing key will be the + current queue for the channel, which is the last + declared queue. + + nowait: boolean + + do not send a reply method + + If set, the server will not respond to the method. The + client should not wait for a reply method. If the + server could not complete the method it will raise a + channel or connection exception. + + arguments: table + + arguments for binding + + A set of arguments for the binding. The syntax and + semantics of these arguments depends on the exchange + class. + """ + arguments = {} if arguments is None else arguments + args = AMQPWriter() + args.write_short(0) + args.write_shortstr(queue) + args.write_shortstr(exchange) + args.write_shortstr(routing_key) + args.write_bit(nowait) + args.write_table(arguments) + self._send_method((50, 20), args) + + if not nowait: + return self.wait(allowed_methods=[ + (50, 21), # Channel.queue_bind_ok + ]) + + def _queue_bind_ok(self, args): + """Confirm bind successful + + This method confirms that the bind was successful. + + """ + pass + + def queue_unbind(self, queue, exchange, routing_key='', + nowait=False, arguments=None): + """Unbind a queue from an exchange + + This method unbinds a queue from an exchange. + + RULE: + + If a unbind fails, the server MUST raise a connection exception. + + PARAMETERS: + queue: shortstr + + Specifies the name of the queue to unbind. + + RULE: + + The client MUST either specify a queue name or have + previously declared a queue on the same channel + + RULE: + + The client MUST NOT attempt to unbind a queue that + does not exist. + + exchange: shortstr + + The name of the exchange to unbind from. + + RULE: + + The client MUST NOT attempt to unbind a queue from an + exchange that does not exist. + + RULE: + + The server MUST accept a blank exchange name to mean + the default exchange. + + routing_key: shortstr + + routing key of binding + + Specifies the routing key of the binding to unbind. + + arguments: table + + arguments of binding + + Specifies the arguments of the binding to unbind. + + """ + arguments = {} if arguments is None else arguments + args = AMQPWriter() + args.write_short(0) + args.write_shortstr(queue) + args.write_shortstr(exchange) + args.write_shortstr(routing_key) + args.write_table(arguments) + self._send_method((50, 50), args) + + if not nowait: + return self.wait(allowed_methods=[ + (50, 51), # Channel.queue_unbind_ok + ]) + + def _queue_unbind_ok(self, args): + """Confirm unbind successful + + This method confirms that the unbind was successful. + + """ + pass + + def queue_declare(self, queue='', passive=False, durable=False, + exclusive=False, auto_delete=True, nowait=False, + arguments=None): + """Declare queue, create if needed + + This method creates or checks a queue. When creating a new + queue the client can specify various properties that control + the durability of the queue and its contents, and the level of + sharing for the queue. + + RULE: + + The server MUST create a default binding for a newly- + created queue to the default exchange, which is an + exchange of type 'direct'. + + RULE: + + The server SHOULD support a minimum of 256 queues per + virtual host and ideally, impose no limit except as + defined by available resources. + + PARAMETERS: + queue: shortstr + + RULE: + + The queue name MAY be empty, in which case the + server MUST create a new queue with a unique + generated name and return this to the client in + the Declare-Ok method. + + RULE: + + Queue names starting with "amq." are reserved for + predeclared and standardised server queues. If + the queue name starts with "amq." and the passive + option is False, the server MUST raise a connection + exception with reply code 403 (access refused). + + passive: boolean + + do not create queue + + If set, the server will not create the queue. The + client can use this to check whether a queue exists + without modifying the server state. + + RULE: + + If set, and the queue does not already exist, the + server MUST respond with a reply code 404 (not + found) and raise a channel exception. + + durable: boolean + + request a durable queue + + If set when creating a new queue, the queue will be + marked as durable. Durable queues remain active when + a server restarts. Non-durable queues (transient + queues) are purged if/when a server restarts. Note + that durable queues do not necessarily hold persistent + messages, although it does not make sense to send + persistent messages to a transient queue. + + RULE: + + The server MUST recreate the durable queue after a + restart. + + RULE: + + The server MUST support both durable and transient + queues. + + RULE: + + The server MUST ignore the durable field if the + queue already exists. + + exclusive: boolean + + request an exclusive queue + + Exclusive queues may only be consumed from by the + current connection. Setting the 'exclusive' flag + always implies 'auto-delete'. + + RULE: + + The server MUST support both exclusive (private) + and non-exclusive (shared) queues. + + RULE: + + The server MUST raise a channel exception if + 'exclusive' is specified and the queue already + exists and is owned by a different connection. + + auto_delete: boolean + + auto-delete queue when unused + + If set, the queue is deleted when all consumers have + finished using it. Last consumer can be cancelled + either explicitly or because its channel is closed. If + there was no consumer ever on the queue, it won't be + deleted. + + RULE: + + The server SHOULD allow for a reasonable delay + between the point when it determines that a queue + is not being used (or no longer used), and the + point when it deletes the queue. At the least it + must allow a client to create a queue and then + create a consumer to read from it, with a small + but non-zero delay between these two actions. The + server should equally allow for clients that may + be disconnected prematurely, and wish to re- + consume from the same queue without losing + messages. We would recommend a configurable + timeout, with a suitable default value being one + minute. + + RULE: + + The server MUST ignore the auto-delete field if + the queue already exists. + + nowait: boolean + + do not send a reply method + + If set, the server will not respond to the method. The + client should not wait for a reply method. If the + server could not complete the method it will raise a + channel or connection exception. + + arguments: table + + arguments for declaration + + A set of arguments for the declaration. The syntax and + semantics of these arguments depends on the server + implementation. This field is ignored if passive is + True. + + Returns a tuple containing 3 items: + the name of the queue (essential for automatically-named queues) + message count + consumer count + + """ + arguments = {} if arguments is None else arguments + args = AMQPWriter() + args.write_short(0) + args.write_shortstr(queue) + args.write_bit(passive) + args.write_bit(durable) + args.write_bit(exclusive) + args.write_bit(auto_delete) + args.write_bit(nowait) + args.write_table(arguments) + self._send_method((50, 10), args) + + if not nowait: + return self.wait(allowed_methods=[ + (50, 11), # Channel.queue_declare_ok + ]) + + def _queue_declare_ok(self, args): + """Confirms a queue definition + + This method confirms a Declare method and confirms the name of + the queue, essential for automatically-named queues. + + PARAMETERS: + queue: shortstr + + Reports the name of the queue. If the server generated + a queue name, this field contains that name. + + message_count: long + + number of messages in queue + + Reports the number of messages in the queue, which + will be zero for newly-created queues. + + consumer_count: long + + number of consumers + + Reports the number of active consumers for the queue. + Note that consumers can suspend activity + (Channel.Flow) in which case they do not appear in + this count. + + """ + return queue_declare_ok_t( + args.read_shortstr(), + args.read_long(), + args.read_long(), + ) + + def queue_delete(self, queue='', + if_unused=False, if_empty=False, nowait=False): + """Delete a queue + + This method deletes a queue. When a queue is deleted any + pending messages are sent to a dead-letter queue if this is + defined in the server configuration, and all consumers on the + queue are cancelled. + + RULE: + + The server SHOULD use a dead-letter queue to hold messages + that were pending on a deleted queue, and MAY provide + facilities for a system administrator to move these + messages back to an active queue. + + PARAMETERS: + queue: shortstr + + Specifies the name of the queue to delete. If the + queue name is empty, refers to the current queue for + the channel, which is the last declared queue. + + RULE: + + If the client did not previously declare a queue, + and the queue name in this method is empty, the + server MUST raise a connection exception with + reply code 530 (not allowed). + + RULE: + + The queue must exist. Attempting to delete a non- + existing queue causes a channel exception. + + if_unused: boolean + + delete only if unused + + If set, the server will only delete the queue if it + has no consumers. If the queue has consumers the + server does does not delete it but raises a channel + exception instead. + + RULE: + + The server MUST respect the if-unused flag when + deleting a queue. + + if_empty: boolean + + delete only if empty + + If set, the server will only delete the queue if it + has no messages. If the queue is not empty the server + raises a channel exception. + + nowait: boolean + + do not send a reply method + + If set, the server will not respond to the method. The + client should not wait for a reply method. If the + server could not complete the method it will raise a + channel or connection exception. + + """ + args = AMQPWriter() + args.write_short(0) + args.write_shortstr(queue) + args.write_bit(if_unused) + args.write_bit(if_empty) + args.write_bit(nowait) + self._send_method((50, 40), args) + + if not nowait: + return self.wait(allowed_methods=[ + (50, 41), # Channel.queue_delete_ok + ]) + + def _queue_delete_ok(self, args): + """Confirm deletion of a queue + + This method confirms the deletion of a queue. + + PARAMETERS: + message_count: long + + number of messages purged + + Reports the number of messages purged. + + """ + return args.read_long() + + def queue_purge(self, queue='', nowait=False): + """Purge a queue + + This method removes all messages from a queue. It does not + cancel consumers. Purged messages are deleted without any + formal "undo" mechanism. + + RULE: + + A call to purge MUST result in an empty queue. + + RULE: + + On transacted channels the server MUST not purge messages + that have already been sent to a client but not yet + acknowledged. + + RULE: + + The server MAY implement a purge queue or log that allows + system administrators to recover accidentally-purged + messages. The server SHOULD NOT keep purged messages in + the same storage spaces as the live messages since the + volumes of purged messages may get very large. + + PARAMETERS: + queue: shortstr + + Specifies the name of the queue to purge. If the + queue name is empty, refers to the current queue for + the channel, which is the last declared queue. + + RULE: + + If the client did not previously declare a queue, + and the queue name in this method is empty, the + server MUST raise a connection exception with + reply code 530 (not allowed). + + RULE: + + The queue must exist. Attempting to purge a non- + existing queue causes a channel exception. + + nowait: boolean + + do not send a reply method + + If set, the server will not respond to the method. The + client should not wait for a reply method. If the + server could not complete the method it will raise a + channel or connection exception. + + if nowait is False, returns a message_count + + """ + args = AMQPWriter() + args.write_short(0) + args.write_shortstr(queue) + args.write_bit(nowait) + self._send_method((50, 30), args) + + if not nowait: + return self.wait(allowed_methods=[ + (50, 31), # Channel.queue_purge_ok + ]) + + def _queue_purge_ok(self, args): + """Confirms a queue purge + + This method confirms the purge of a queue. + + PARAMETERS: + message_count: long + + number of messages purged + + Reports the number of messages purged. + + """ + return args.read_long() + + ############# + # + # Basic + # + # + # work with basic content + # + # The Basic class provides methods that support an industry- + # standard messaging model. + # + # GRAMMAR:: + # + # basic = C:QOS S:QOS-OK + # / C:CONSUME S:CONSUME-OK + # / C:CANCEL S:CANCEL-OK + # / C:PUBLISH content + # / S:RETURN content + # / S:DELIVER content + # / C:GET ( S:GET-OK content / S:GET-EMPTY ) + # / C:ACK + # / C:REJECT + # + # RULE: + # + # The server SHOULD respect the persistent property of basic + # messages and SHOULD make a best-effort to hold persistent + # basic messages on a reliable storage mechanism. + # + # RULE: + # + # The server MUST NOT discard a persistent basic message in + # case of a queue overflow. The server MAY use the + # Channel.Flow method to slow or stop a basic message + # publisher when necessary. + # + # RULE: + # + # The server MAY overflow non-persistent basic messages to + # persistent storage and MAY discard or dead-letter non- + # persistent basic messages on a priority basis if the queue + # size exceeds some configured limit. + # + # RULE: + # + # The server MUST implement at least 2 priority levels for + # basic messages, where priorities 0-4 and 5-9 are treated as + # two distinct levels. The server MAY implement up to 10 + # priority levels. + # + # RULE: + # + # The server MUST deliver messages of the same priority in + # order irrespective of their individual persistence. + # + # RULE: + # + # The server MUST support both automatic and explicit + # acknowledgements on Basic content. + # + + def basic_ack(self, delivery_tag, multiple=False): + """Acknowledge one or more messages + + This method acknowledges one or more messages delivered via + the Deliver or Get-Ok methods. The client can ask to confirm + a single message or a set of messages up to and including a + specific message. + + PARAMETERS: + delivery_tag: longlong + + server-assigned delivery tag + + The server-assigned and channel-specific delivery tag + + RULE: + + The delivery tag is valid only within the channel + from which the message was received. I.e. a client + MUST NOT receive a message on one channel and then + acknowledge it on another. + + RULE: + + The server MUST NOT use a zero value for delivery + tags. Zero is reserved for client use, meaning "all + messages so far received". + + multiple: boolean + + acknowledge multiple messages + + If set to True, the delivery tag is treated as "up to + and including", so that the client can acknowledge + multiple messages with a single method. If set to + False, the delivery tag refers to a single message. + If the multiple field is True, and the delivery tag + is zero, tells the server to acknowledge all + outstanding mesages. + + RULE: + + The server MUST validate that a non-zero delivery- + tag refers to an delivered message, and raise a + channel exception if this is not the case. + + """ + args = AMQPWriter() + args.write_longlong(delivery_tag) + args.write_bit(multiple) + self._send_method((60, 80), args) + + def basic_cancel(self, consumer_tag, nowait=False): + """End a queue consumer + + This method cancels a consumer. This does not affect already + delivered messages, but it does mean the server will not send + any more messages for that consumer. The client may receive + an abitrary number of messages in between sending the cancel + method and receiving the cancel-ok reply. + + RULE: + + If the queue no longer exists when the client sends a + cancel command, or the consumer has been cancelled for + other reasons, this command has no effect. + + PARAMETERS: + consumer_tag: shortstr + + consumer tag + + Identifier for the consumer, valid within the current + connection. + + RULE: + + The consumer tag is valid only within the channel + from which the consumer was created. I.e. a client + MUST NOT create a consumer in one channel and then + use it in another. + + nowait: boolean + + do not send a reply method + + If set, the server will not respond to the method. The + client should not wait for a reply method. If the + server could not complete the method it will raise a + channel or connection exception. + + """ + if self.connection is not None: + self.no_ack_consumers.discard(consumer_tag) + args = AMQPWriter() + args.write_shortstr(consumer_tag) + args.write_bit(nowait) + self._send_method((60, 30), args) + return self.wait(allowed_methods=[ + (60, 31), # Channel.basic_cancel_ok + ]) + + def _basic_cancel_notify(self, args): + """Consumer cancelled by server. + + Most likely the queue was deleted. + + """ + consumer_tag = args.read_shortstr() + callback = self._on_cancel(consumer_tag) + if callback: + callback(consumer_tag) + else: + raise ConsumerCancelled(consumer_tag, (60, 30)) + + def _basic_cancel_ok(self, args): + """Confirm a cancelled consumer + + This method confirms that the cancellation was completed. + + PARAMETERS: + consumer_tag: shortstr + + consumer tag + + Identifier for the consumer, valid within the current + connection. + + RULE: + + The consumer tag is valid only within the channel + from which the consumer was created. I.e. a client + MUST NOT create a consumer in one channel and then + use it in another. + + """ + consumer_tag = args.read_shortstr() + self._on_cancel(consumer_tag) + + def _on_cancel(self, consumer_tag): + self.callbacks.pop(consumer_tag, None) + return self.cancel_callbacks.pop(consumer_tag, None) + + def basic_consume(self, queue='', consumer_tag='', no_local=False, + no_ack=False, exclusive=False, nowait=False, + callback=None, arguments=None, on_cancel=None): + """Start a queue consumer + + This method asks the server to start a "consumer", which is a + transient request for messages from a specific queue. + Consumers last as long as the channel they were created on, or + until the client cancels them. + + RULE: + + The server SHOULD support at least 16 consumers per queue, + unless the queue was declared as private, and ideally, + impose no limit except as defined by available resources. + + PARAMETERS: + queue: shortstr + + Specifies the name of the queue to consume from. If + the queue name is null, refers to the current queue + for the channel, which is the last declared queue. + + RULE: + + If the client did not previously declare a queue, + and the queue name in this method is empty, the + server MUST raise a connection exception with + reply code 530 (not allowed). + + consumer_tag: shortstr + + Specifies the identifier for the consumer. The + consumer tag is local to a connection, so two clients + can use the same consumer tags. If this field is empty + the server will generate a unique tag. + + RULE: + + The tag MUST NOT refer to an existing consumer. If + the client attempts to create two consumers with + the same non-empty tag the server MUST raise a + connection exception with reply code 530 (not + allowed). + + no_local: boolean + + do not deliver own messages + + If the no-local field is set the server will not send + messages to the client that published them. + + no_ack: boolean + + no acknowledgement needed + + If this field is set the server does not expect + acknowledgments for messages. That is, when a message + is delivered to the client the server automatically and + silently acknowledges it on behalf of the client. This + functionality increases performance but at the cost of + reliability. Messages can get lost if a client dies + before it can deliver them to the application. + + exclusive: boolean + + request exclusive access + + Request exclusive consumer access, meaning only this + consumer can access the queue. + + RULE: + + If the server cannot grant exclusive access to the + queue when asked, - because there are other + consumers active - it MUST raise a channel + exception with return code 403 (access refused). + + nowait: boolean + + do not send a reply method + + If set, the server will not respond to the method. The + client should not wait for a reply method. If the + server could not complete the method it will raise a + channel or connection exception. + + callback: Python callable + + function/method called with each delivered message + + For each message delivered by the broker, the + callable will be called with a Message object + as the single argument. If no callable is specified, + messages are quietly discarded, no_ack should probably + be set to True in that case. + + """ + args = AMQPWriter() + args.write_short(0) + args.write_shortstr(queue) + args.write_shortstr(consumer_tag) + args.write_bit(no_local) + args.write_bit(no_ack) + args.write_bit(exclusive) + args.write_bit(nowait) + args.write_table(arguments or {}) + self._send_method((60, 20), args) + + if not nowait: + consumer_tag = self.wait(allowed_methods=[ + (60, 21), # Channel.basic_consume_ok + ]) + + self.callbacks[consumer_tag] = callback + + if on_cancel: + self.cancel_callbacks[consumer_tag] = on_cancel + if no_ack: + self.no_ack_consumers.add(consumer_tag) + + return consumer_tag + + def _basic_consume_ok(self, args): + """Confirm a new consumer + + The server provides the client with a consumer tag, which is + used by the client for methods called on the consumer at a + later stage. + + PARAMETERS: + consumer_tag: shortstr + + Holds the consumer tag specified by the client or + provided by the server. + + """ + return args.read_shortstr() + + def _basic_deliver(self, args, msg): + """Notify the client of a consumer message + + This method delivers a message to the client, via a consumer. + In the asynchronous message delivery model, the client starts + a consumer using the Consume method, then the server responds + with Deliver methods as and when messages arrive for that + consumer. + + RULE: + + The server SHOULD track the number of times a message has + been delivered to clients and when a message is + redelivered a certain number of times - e.g. 5 times - + without being acknowledged, the server SHOULD consider the + message to be unprocessable (possibly causing client + applications to abort), and move the message to a dead + letter queue. + + PARAMETERS: + consumer_tag: shortstr + + consumer tag + + Identifier for the consumer, valid within the current + connection. + + RULE: + + The consumer tag is valid only within the channel + from which the consumer was created. I.e. a client + MUST NOT create a consumer in one channel and then + use it in another. + + delivery_tag: longlong + + server-assigned delivery tag + + The server-assigned and channel-specific delivery tag + + RULE: + + The delivery tag is valid only within the channel + from which the message was received. I.e. a client + MUST NOT receive a message on one channel and then + acknowledge it on another. + + RULE: + + The server MUST NOT use a zero value for delivery + tags. Zero is reserved for client use, meaning "all + messages so far received". + + redelivered: boolean + + message is being redelivered + + This indicates that the message has been previously + delivered to this or another client. + + exchange: shortstr + + Specifies the name of the exchange that the message + was originally published to. + + routing_key: shortstr + + Message routing key + + Specifies the routing key name specified when the + message was published. + + """ + consumer_tag = args.read_shortstr() + delivery_tag = args.read_longlong() + redelivered = args.read_bit() + exchange = args.read_shortstr() + routing_key = args.read_shortstr() + + msg.channel = self + msg.delivery_info = { + 'consumer_tag': consumer_tag, + 'delivery_tag': delivery_tag, + 'redelivered': redelivered, + 'exchange': exchange, + 'routing_key': routing_key, + } + + try: + fun = self.callbacks[consumer_tag] + except KeyError: + pass + else: + fun(msg) + + def basic_get(self, queue='', no_ack=False): + """Direct access to a queue + + This method provides a direct access to the messages in a + queue using a synchronous dialogue that is designed for + specific types of application where synchronous functionality + is more important than performance. + + PARAMETERS: + queue: shortstr + + Specifies the name of the queue to consume from. If + the queue name is null, refers to the current queue + for the channel, which is the last declared queue. + + RULE: + + If the client did not previously declare a queue, + and the queue name in this method is empty, the + server MUST raise a connection exception with + reply code 530 (not allowed). + + no_ack: boolean + + no acknowledgement needed + + If this field is set the server does not expect + acknowledgments for messages. That is, when a message + is delivered to the client the server automatically and + silently acknowledges it on behalf of the client. This + functionality increases performance but at the cost of + reliability. Messages can get lost if a client dies + before it can deliver them to the application. + + Non-blocking, returns a message object, or None. + + """ + args = AMQPWriter() + args.write_short(0) + args.write_shortstr(queue) + args.write_bit(no_ack) + self._send_method((60, 70), args) + return self.wait(allowed_methods=[ + (60, 71), # Channel.basic_get_ok + (60, 72), # Channel.basic_get_empty + ]) + + def _basic_get_empty(self, args): + """Indicate no messages available + + This method tells the client that the queue has no messages + available for the client. + + PARAMETERS: + cluster_id: shortstr + + Cluster id + + For use by cluster applications, should not be used by + client applications. + + """ + cluster_id = args.read_shortstr() # noqa + + def _basic_get_ok(self, args, msg): + """Provide client with a message + + This method delivers a message to the client following a get + method. A message delivered by 'get-ok' must be acknowledged + unless the no-ack option was set in the get method. + + PARAMETERS: + delivery_tag: longlong + + server-assigned delivery tag + + The server-assigned and channel-specific delivery tag + + RULE: + + The delivery tag is valid only within the channel + from which the message was received. I.e. a client + MUST NOT receive a message on one channel and then + acknowledge it on another. + + RULE: + + The server MUST NOT use a zero value for delivery + tags. Zero is reserved for client use, meaning "all + messages so far received". + + redelivered: boolean + + message is being redelivered + + This indicates that the message has been previously + delivered to this or another client. + + exchange: shortstr + + Specifies the name of the exchange that the message + was originally published to. If empty, the message + was published to the default exchange. + + routing_key: shortstr + + Message routing key + + Specifies the routing key name specified when the + message was published. + + message_count: long + + number of messages pending + + This field reports the number of messages pending on + the queue, excluding the message being delivered. + Note that this figure is indicative, not reliable, and + can change arbitrarily as messages are added to the + queue and removed by other clients. + + """ + delivery_tag = args.read_longlong() + redelivered = args.read_bit() + exchange = args.read_shortstr() + routing_key = args.read_shortstr() + message_count = args.read_long() + + msg.channel = self + msg.delivery_info = { + 'delivery_tag': delivery_tag, + 'redelivered': redelivered, + 'exchange': exchange, + 'routing_key': routing_key, + 'message_count': message_count + } + return msg + + def _basic_publish(self, msg, exchange='', routing_key='', + mandatory=False, immediate=False): + """Publish a message + + This method publishes a message to a specific exchange. The + message will be routed to queues as defined by the exchange + configuration and distributed to any active consumers when the + transaction, if any, is committed. + + PARAMETERS: + exchange: shortstr + + Specifies the name of the exchange to publish to. The + exchange name can be empty, meaning the default + exchange. If the exchange name is specified, and that + exchange does not exist, the server will raise a + channel exception. + + RULE: + + The server MUST accept a blank exchange name to + mean the default exchange. + + RULE: + + The exchange MAY refuse basic content in which + case it MUST raise a channel exception with reply + code 540 (not implemented). + + routing_key: shortstr + + Message routing key + + Specifies the routing key for the message. The + routing key is used for routing messages depending on + the exchange configuration. + + mandatory: boolean + + indicate mandatory routing + + This flag tells the server how to react if the message + cannot be routed to a queue. If this flag is True, the + server will return an unroutable message with a Return + method. If this flag is False, the server silently + drops the message. + + RULE: + + The server SHOULD implement the mandatory flag. + + immediate: boolean + + request immediate delivery + + This flag tells the server how to react if the message + cannot be routed to a queue consumer immediately. If + this flag is set, the server will return an + undeliverable message with a Return method. If this + flag is zero, the server will queue the message, but + with no guarantee that it will ever be consumed. + + RULE: + + The server SHOULD implement the immediate flag. + + """ + args = AMQPWriter() + args.write_short(0) + args.write_shortstr(exchange) + args.write_shortstr(routing_key) + args.write_bit(mandatory) + args.write_bit(immediate) + + self._send_method((60, 40), args, msg) + basic_publish = _basic_publish + + def basic_publish_confirm(self, *args, **kwargs): + if not self._confirm_selected: + self._confirm_selected = True + self.confirm_select() + ret = self._basic_publish(*args, **kwargs) + # Basic.Ack / Basic.Nack + self.wait([(60, 80), (60, 120)]) + return ret + + def basic_qos(self, prefetch_size, prefetch_count, a_global): + """Specify quality of service + + This method requests a specific quality of service. The QoS + can be specified for the current channel or for all channels + on the connection. The particular properties and semantics of + a qos method always depend on the content class semantics. + Though the qos method could in principle apply to both peers, + it is currently meaningful only for the server. + + PARAMETERS: + prefetch_size: long + + prefetch window in octets + + The client can request that messages be sent in + advance so that when the client finishes processing a + message, the following message is already held + locally, rather than needing to be sent down the + channel. Prefetching gives a performance improvement. + This field specifies the prefetch window size in + octets. The server will send a message in advance if + it is equal to or smaller in size than the available + prefetch size (and also falls into other prefetch + limits). May be set to zero, meaning "no specific + limit", although other prefetch limits may still + apply. The prefetch-size is ignored if the no-ack + option is set. + + RULE: + + The server MUST ignore this setting when the + client is not processing any messages - i.e. the + prefetch size does not limit the transfer of + single messages to a client, only the sending in + advance of more messages while the client still + has one or more unacknowledged messages. + + prefetch_count: short + + prefetch window in messages + + Specifies a prefetch window in terms of whole + messages. This field may be used in combination with + the prefetch-size field; a message will only be sent + in advance if both prefetch windows (and those at the + channel and connection level) allow it. The prefetch- + count is ignored if the no-ack option is set. + + RULE: + + The server MAY send less data in advance than + allowed by the client's specified prefetch windows + but it MUST NOT send more. + + a_global: boolean + + apply to entire connection + + By default the QoS settings apply to the current + channel only. If this field is set, they are applied + to the entire connection. + + """ + args = AMQPWriter() + args.write_long(prefetch_size) + args.write_short(prefetch_count) + args.write_bit(a_global) + self._send_method((60, 10), args) + return self.wait(allowed_methods=[ + (60, 11), # Channel.basic_qos_ok + ]) + + def _basic_qos_ok(self, args): + """Confirm the requested qos + + This method tells the client that the requested QoS levels + could be handled by the server. The requested QoS applies to + all active consumers until a new QoS is defined. + + """ + pass + + def basic_recover(self, requeue=False): + """Redeliver unacknowledged messages + + This method asks the broker to redeliver all unacknowledged + messages on a specified channel. Zero or more messages may be + redelivered. This method is only allowed on non-transacted + channels. + + RULE: + + The server MUST set the redelivered flag on all messages + that are resent. + + RULE: + + The server MUST raise a channel exception if this is + called on a transacted channel. + + PARAMETERS: + requeue: boolean + + requeue the message + + If this field is False, the message will be redelivered + to the original recipient. If this field is True, the + server will attempt to requeue the message, + potentially then delivering it to an alternative + subscriber. + + """ + args = AMQPWriter() + args.write_bit(requeue) + self._send_method((60, 110), args) + + def basic_recover_async(self, requeue=False): + args = AMQPWriter() + args.write_bit(requeue) + self._send_method((60, 100), args) + + def _basic_recover_ok(self, args): + """In 0-9-1 the deprecated recover solicits a response.""" + pass + + def basic_reject(self, delivery_tag, requeue): + """Reject an incoming message + + This method allows a client to reject a message. It can be + used to interrupt and cancel large incoming messages, or + return untreatable messages to their original queue. + + RULE: + + The server SHOULD be capable of accepting and process the + Reject method while sending message content with a Deliver + or Get-Ok method. I.e. the server should read and process + incoming methods while sending output frames. To cancel a + partially-send content, the server sends a content body + frame of size 1 (i.e. with no data except the frame-end + octet). + + RULE: + + The server SHOULD interpret this method as meaning that + the client is unable to process the message at this time. + + RULE: + + A client MUST NOT use this method as a means of selecting + messages to process. A rejected message MAY be discarded + or dead-lettered, not necessarily passed to another + client. + + PARAMETERS: + delivery_tag: longlong + + server-assigned delivery tag + + The server-assigned and channel-specific delivery tag + + RULE: + + The delivery tag is valid only within the channel + from which the message was received. I.e. a client + MUST NOT receive a message on one channel and then + acknowledge it on another. + + RULE: + + The server MUST NOT use a zero value for delivery + tags. Zero is reserved for client use, meaning "all + messages so far received". + + requeue: boolean + + requeue the message + + If this field is False, the message will be discarded. + If this field is True, the server will attempt to + requeue the message. + + RULE: + + The server MUST NOT deliver the message to the + same client within the context of the current + channel. The recommended strategy is to attempt + to deliver the message to an alternative consumer, + and if that is not possible, to move the message + to a dead-letter queue. The server MAY use more + sophisticated tracking to hold the message on the + queue and redeliver it to the same client at a + later stage. + + """ + args = AMQPWriter() + args.write_longlong(delivery_tag) + args.write_bit(requeue) + self._send_method((60, 90), args) + + def _basic_return(self, args, msg): + """Return a failed message + + This method returns an undeliverable message that was + published with the "immediate" flag set, or an unroutable + message published with the "mandatory" flag set. The reply + code and text provide information about the reason that the + message was undeliverable. + + PARAMETERS: + reply_code: short + + The reply code. The AMQ reply codes are defined in AMQ + RFC 011. + + reply_text: shortstr + + The localised reply text. This text can be logged as an + aid to resolving issues. + + exchange: shortstr + + Specifies the name of the exchange that the message + was originally published to. + + routing_key: shortstr + + Message routing key + + Specifies the routing key name specified when the + message was published. + + """ + self.returned_messages.put(basic_return_t( + args.read_short(), + args.read_shortstr(), + args.read_shortstr(), + args.read_shortstr(), + msg, + )) + + ############# + # + # Tx + # + # + # work with standard transactions + # + # Standard transactions provide so-called "1.5 phase commit". We + # can ensure that work is never lost, but there is a chance of + # confirmations being lost, so that messages may be resent. + # Applications that use standard transactions must be able to + # detect and ignore duplicate messages. + # + # GRAMMAR:: + # + # tx = C:SELECT S:SELECT-OK + # / C:COMMIT S:COMMIT-OK + # / C:ROLLBACK S:ROLLBACK-OK + # + # RULE: + # + # An client using standard transactions SHOULD be able to + # track all messages received within a reasonable period, and + # thus detect and reject duplicates of the same message. It + # SHOULD NOT pass these to the application layer. + # + # + + def tx_commit(self): + """Commit the current transaction + + This method commits all messages published and acknowledged in + the current transaction. A new transaction starts immediately + after a commit. + + """ + self._send_method((90, 20)) + return self.wait(allowed_methods=[ + (90, 21), # Channel.tx_commit_ok + ]) + + def _tx_commit_ok(self, args): + """Confirm a successful commit + + This method confirms to the client that the commit succeeded. + Note that if a commit fails, the server raises a channel + exception. + + """ + pass + + def tx_rollback(self): + """Abandon the current transaction + + This method abandons all messages published and acknowledged + in the current transaction. A new transaction starts + immediately after a rollback. + + """ + self._send_method((90, 30)) + return self.wait(allowed_methods=[ + (90, 31), # Channel.tx_rollback_ok + ]) + + def _tx_rollback_ok(self, args): + """Confirm a successful rollback + + This method confirms to the client that the rollback + succeeded. Note that if an rollback fails, the server raises a + channel exception. + + """ + pass + + def tx_select(self): + """Select standard transaction mode + + This method sets the channel to use standard transactions. + The client must use this method at least once on a channel + before using the Commit or Rollback methods. + + """ + self._send_method((90, 10)) + return self.wait(allowed_methods=[ + (90, 11), # Channel.tx_select_ok + ]) + + def _tx_select_ok(self, args): + """Confirm transaction mode + + This method confirms to the client that the channel was + successfully set to use standard transactions. + + """ + pass + + def confirm_select(self, nowait=False): + """Enables publisher confirms for this channel (an RabbitMQ + extension). + + Can now be used if the channel is in transactional mode. + + :param nowait: + If set, the server will not respond to the method. + The client should not wait for a reply method. If the + server could not complete the method it will raise a channel + or connection exception. + + """ + args = AMQPWriter() + args.write_bit(nowait) + + self._send_method((85, 10), args) + if not nowait: + self.wait(allowed_methods=[ + (85, 11), # Confirm.select_ok + ]) + + def _confirm_select_ok(self, args): + """With this method the broker confirms to the client that + the channel is now using publisher confirms.""" + pass + + def _basic_ack_recv(self, args): + delivery_tag = args.read_longlong() + multiple = args.read_bit() + self._apply_callbacks('basic_ack', delivery_tag, multiple) + + def _apply_callbacks(self, event, *args): + return [callback(*args) for callback in self.events[event]] + + def _basic_nack(self, args): + delivery_tag = args.read_longlong() + multiple = args.read_bit() + requeue = args.read_bit() + if not self._apply_callbacks( + 'basic_nack', delivery_tag, multiple, requeue): + raise NotConfirmed(delivery_tag, (60, 120), 'basic.nack') + + _METHOD_MAP = { + (20, 11): _open_ok, + (20, 20): _flow, + (20, 21): _flow_ok, + (20, 40): _close, + (20, 41): _close_ok, + (40, 11): _exchange_declare_ok, + (40, 21): _exchange_delete_ok, + (40, 31): _exchange_bind_ok, + (40, 51): _exchange_unbind_ok, + (50, 11): _queue_declare_ok, + (50, 21): _queue_bind_ok, + (50, 31): _queue_purge_ok, + (50, 41): _queue_delete_ok, + (50, 51): _queue_unbind_ok, + (60, 11): _basic_qos_ok, + (60, 21): _basic_consume_ok, + (60, 30): _basic_cancel_notify, + (60, 31): _basic_cancel_ok, + (60, 50): _basic_return, + (60, 60): _basic_deliver, + (60, 71): _basic_get_ok, + (60, 72): _basic_get_empty, + (60, 80): _basic_ack_recv, + (60, 120): _basic_nack, + (60, 111): _basic_recover_ok, + (85, 11): _confirm_select_ok, + (90, 11): _tx_select_ok, + (90, 21): _tx_commit_ok, + (90, 31): _tx_rollback_ok, + } + + _IMMEDIATE_METHODS = [ + (60, 50), # basic_return + ] diff --git a/thesisenv/lib/python3.6/site-packages/amqp/connection.py b/thesisenv/lib/python3.6/site-packages/amqp/connection.py new file mode 100644 index 0000000..6988f85 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp/connection.py @@ -0,0 +1,1008 @@ +"""AMQP Connections""" +# Copyright (C) 2007-2008 Barry Pederson +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 +from __future__ import absolute_import + +import logging +import socket + +from array import array +try: + from ssl import SSLError +except ImportError: + class SSLError(Exception): # noqa + pass + +from . import __version__ +from .abstract_channel import AbstractChannel +from .channel import Channel +from .exceptions import ( + AMQPNotImplementedError, ChannelError, ResourceError, + ConnectionForced, ConnectionError, error_for_code, + RecoverableConnectionError, RecoverableChannelError, +) +from .five import items, range, values, monotonic +from .method_framing import MethodReader, MethodWriter +from .serialization import AMQPWriter +from .transport import create_transport + +HAS_MSG_PEEK = hasattr(socket, 'MSG_PEEK') + +START_DEBUG_FMT = """ +Start from server, version: %d.%d, properties: %s, mechanisms: %s, locales: %s +""".strip() + +__all__ = ['Connection'] + +# +# Client property info that gets sent to the server on connection startup +# +LIBRARY_PROPERTIES = { + 'product': 'py-amqp', + 'product_version': __version__, + 'capabilities': {}, +} + +AMQP_LOGGER = logging.getLogger('amqp') + + +class Connection(AbstractChannel): + """The connection class provides methods for a client to establish a + network connection to a server, and for both peers to operate the + connection thereafter. + + GRAMMAR:: + + connection = open-connection *use-connection close-connection + open-connection = C:protocol-header + S:START C:START-OK + *challenge + S:TUNE C:TUNE-OK + C:OPEN S:OPEN-OK + challenge = S:SECURE C:SECURE-OK + use-connection = *channel + close-connection = C:CLOSE S:CLOSE-OK + / S:CLOSE C:CLOSE-OK + + """ + Channel = Channel + + #: Final heartbeat interval value (in float seconds) after negotiation + heartbeat = None + + #: Original heartbeat interval value proposed by client. + client_heartbeat = None + + #: Original heartbeat interval proposed by server. + server_heartbeat = None + + #: Time of last heartbeat sent (in monotonic time, if available). + last_heartbeat_sent = 0 + + #: Time of last heartbeat received (in monotonic time, if available). + last_heartbeat_received = 0 + + #: Number of bytes sent to socket at the last heartbeat check. + prev_sent = None + + #: Number of bytes received from socket at the last heartbeat check. + prev_recv = None + + def __init__(self, host='localhost', userid='guest', password='guest', + login_method='AMQPLAIN', login_response=None, + virtual_host='/', locale='en_US', client_properties=None, + ssl=False, connect_timeout=None, channel_max=None, + frame_max=None, heartbeat=0, on_blocked=None, + on_unblocked=None, confirm_publish=False, **kwargs): + """Create a connection to the specified host, which should be + a 'host[:port]', such as 'localhost', or '1.2.3.4:5672' + (defaults to 'localhost', if a port is not specified then + 5672 is used) + + If login_response is not specified, one is built up for you from + userid and password if they are present. + + The 'ssl' parameter may be simply True/False, or for Python >= 2.6 + a dictionary of options to pass to ssl.wrap_socket() such as + requiring certain certificates. + + """ + channel_max = channel_max or 65535 + frame_max = frame_max or 131072 + if (login_response is None) \ + and (userid is not None) \ + and (password is not None): + login_response = AMQPWriter() + login_response.write_table({'LOGIN': userid, 'PASSWORD': password}) + # Skip the length at the beginning + login_response = login_response.getvalue()[4:] + + d = dict(LIBRARY_PROPERTIES, **client_properties or {}) + self._method_override = {(60, 50): self._dispatch_basic_return} + + self.channels = {} + # The connection object itself is treated as channel 0 + super(Connection, self).__init__(self, 0) + + self.transport = None + + # Properties set in the Tune method + self.channel_max = channel_max + self.frame_max = frame_max + self.client_heartbeat = heartbeat + + self.confirm_publish = confirm_publish + + # Callbacks + self.on_blocked = on_blocked + self.on_unblocked = on_unblocked + + self._avail_channel_ids = array('H', range(self.channel_max, 0, -1)) + + # Properties set in the Start method + self.version_major = 0 + self.version_minor = 0 + self.server_properties = {} + self.mechanisms = [] + self.locales = [] + + # Let the transport.py module setup the actual + # socket connection to the broker. + # + self.transport = self.Transport(host, connect_timeout, ssl) + + self.method_reader = MethodReader(self.transport) + self.method_writer = MethodWriter(self.transport, self.frame_max) + + self.wait(allowed_methods=[ + (10, 10), # start + ]) + + self._x_start_ok(d, login_method, login_response, locale) + + self._wait_tune_ok = True + while self._wait_tune_ok: + self.wait(allowed_methods=[ + (10, 20), # secure + (10, 30), # tune + ]) + + return self._x_open(virtual_host) + + def Transport(self, host, connect_timeout, ssl=False): + return create_transport(host, connect_timeout, ssl) + + @property + def connected(self): + return self.transport and self.transport.connected + + def _do_close(self): + try: + self.transport.close() + + temp_list = [x for x in values(self.channels) if x is not self] + for ch in temp_list: + ch._do_close() + except socket.error: + pass # connection already closed on the other end + finally: + self.transport = self.connection = self.channels = None + + def _get_free_channel_id(self): + try: + return self._avail_channel_ids.pop() + except IndexError: + raise ResourceError( + 'No free channel ids, current={0}, channel_max={1}'.format( + len(self.channels), self.channel_max), (20, 10)) + + def _claim_channel_id(self, channel_id): + try: + return self._avail_channel_ids.remove(channel_id) + except ValueError: + raise ConnectionError( + 'Channel %r already open' % (channel_id, )) + + def _wait_method(self, channel_id, allowed_methods, timeout=None): + """Wait for a method from the server destined for + a particular channel.""" + # + # Check the channel's deferred methods + # + method_queue = self.channels[channel_id].method_queue + + for queued_method in method_queue: + method_sig = queued_method[0] + if (allowed_methods is None) \ + or (method_sig in allowed_methods) \ + or (method_sig == (20, 40)): + method_queue.remove(queued_method) + return queued_method + + # + # Nothing queued, need to wait for a method from the peer + # + read_timeout = self.read_timeout + wait = self.wait + while 1: + channel, method_sig, args, content = read_timeout(timeout) + + if channel == channel_id and ( + allowed_methods is None or + method_sig in allowed_methods or + method_sig == (20, 40)): + return method_sig, args, content + + # + # Certain methods like basic_return should be dispatched + # immediately rather than being queued, even if they're not + # one of the 'allowed_methods' we're looking for. + # + if channel and method_sig in self.Channel._IMMEDIATE_METHODS: + self.channels[channel].dispatch_method( + method_sig, args, content, + ) + continue + + # + # Not the channel and/or method we were looking for. Queue + # this method for later + # + self.channels[channel].method_queue.append( + (method_sig, args, content), + ) + + # + # If we just queued up a method for channel 0 (the Connection + # itself) it's probably a close method in reaction to some + # error, so deal with it right away. + # + if not channel: + wait() + + def channel(self, channel_id=None): + """Fetch a Channel object identified by the numeric channel_id, or + create that object if it doesn't already exist.""" + try: + return self.channels[channel_id] + except KeyError: + return self.Channel(self, channel_id) + + def is_alive(self): + if HAS_MSG_PEEK: + sock = self.sock + prev = sock.gettimeout() + sock.settimeout(0.0001) + try: + sock.recv(1, socket.MSG_PEEK) + except socket.timeout: + pass + except socket.error: + return False + finally: + sock.settimeout(prev) + return True + + def drain_events(self, timeout=None): + """Wait for an event on a channel.""" + chanmap = self.channels + chanid, method_sig, args, content = self._wait_multiple( + chanmap, None, timeout=timeout, + ) + + channel = chanmap[chanid] + + if (content and + channel.auto_decode and + hasattr(content, 'content_encoding')): + try: + content.body = content.body.decode(content.content_encoding) + except Exception: + pass + + amqp_method = (self._method_override.get(method_sig) or + channel._METHOD_MAP.get(method_sig, None)) + + if amqp_method is None: + raise AMQPNotImplementedError( + 'Unknown AMQP method {0!r}'.format(method_sig)) + + if content is None: + return amqp_method(channel, args) + else: + return amqp_method(channel, args, content) + + def read_timeout(self, timeout=None): + if timeout is None: + return self.method_reader.read_method() + sock = self.sock + prev = sock.gettimeout() + if prev != timeout: + sock.settimeout(timeout) + try: + try: + return self.method_reader.read_method() + except SSLError as exc: + # http://bugs.python.org/issue10272 + if 'timed out' in str(exc): + raise socket.timeout() + # Non-blocking SSL sockets can throw SSLError + if 'The operation did not complete' in str(exc): + raise socket.timeout() + raise + finally: + if prev != timeout: + sock.settimeout(prev) + + def _wait_multiple(self, channels, allowed_methods, timeout=None): + for channel_id, channel in items(channels): + method_queue = channel.method_queue + for queued_method in method_queue: + method_sig = queued_method[0] + if (allowed_methods is None or + method_sig in allowed_methods or + method_sig == (20, 40)): + method_queue.remove(queued_method) + method_sig, args, content = queued_method + return channel_id, method_sig, args, content + + # Nothing queued, need to wait for a method from the peer + read_timeout = self.read_timeout + wait = self.wait + while 1: + channel, method_sig, args, content = read_timeout(timeout) + + if channel in channels and ( + allowed_methods is None or + method_sig in allowed_methods or + method_sig == (20, 40)): + return channel, method_sig, args, content + + # Not the channel and/or method we were looking for. Queue + # this method for later + channels[channel].method_queue.append((method_sig, args, content)) + + # + # If we just queued up a method for channel 0 (the Connection + # itself) it's probably a close method in reaction to some + # error, so deal with it right away. + # + if channel == 0: + wait() + + def _dispatch_basic_return(self, channel, args, msg): + reply_code = args.read_short() + reply_text = args.read_shortstr() + exchange = args.read_shortstr() + routing_key = args.read_shortstr() + + exc = error_for_code(reply_code, reply_text, (50, 60), ChannelError) + handlers = channel.events.get('basic_return') + if not handlers: + raise exc + for callback in handlers: + callback(exc, exchange, routing_key, msg) + + def close(self, reply_code=0, reply_text='', method_sig=(0, 0)): + """Request a connection close + + This method indicates that the sender wants to close the + connection. This may be due to internal conditions (e.g. a + forced shut-down) or due to an error handling a specific + method, i.e. an exception. When a close is due to an + exception, the sender provides the class and method id of the + method which caused the exception. + + RULE: + + After sending this method any received method except the + Close-OK method MUST be discarded. + + RULE: + + The peer sending this method MAY use a counter or timeout + to detect failure of the other peer to respond correctly + with the Close-OK method. + + RULE: + + When a server receives the Close method from a client it + MUST delete all server-side resources associated with the + client's context. A client CANNOT reconnect to a context + after sending or receiving a Close method. + + PARAMETERS: + reply_code: short + + The reply code. The AMQ reply codes are defined in AMQ + RFC 011. + + reply_text: shortstr + + The localised reply text. This text can be logged as an + aid to resolving issues. + + class_id: short + + failing method class + + When the close is provoked by a method exception, this + is the class of the method. + + method_id: short + + failing method ID + + When the close is provoked by a method exception, this + is the ID of the method. + + """ + if self.transport is None: + # already closed + return + + args = AMQPWriter() + args.write_short(reply_code) + args.write_shortstr(reply_text) + args.write_short(method_sig[0]) # class_id + args.write_short(method_sig[1]) # method_id + self._send_method((10, 50), args) + return self.wait(allowed_methods=[ + (10, 50), # Connection.close + (10, 51), # Connection.close_ok + ]) + + def _close(self, args): + """Request a connection close + + This method indicates that the sender wants to close the + connection. This may be due to internal conditions (e.g. a + forced shut-down) or due to an error handling a specific + method, i.e. an exception. When a close is due to an + exception, the sender provides the class and method id of the + method which caused the exception. + + RULE: + + After sending this method any received method except the + Close-OK method MUST be discarded. + + RULE: + + The peer sending this method MAY use a counter or timeout + to detect failure of the other peer to respond correctly + with the Close-OK method. + + RULE: + + When a server receives the Close method from a client it + MUST delete all server-side resources associated with the + client's context. A client CANNOT reconnect to a context + after sending or receiving a Close method. + + PARAMETERS: + reply_code: short + + The reply code. The AMQ reply codes are defined in AMQ + RFC 011. + + reply_text: shortstr + + The localised reply text. This text can be logged as an + aid to resolving issues. + + class_id: short + + failing method class + + When the close is provoked by a method exception, this + is the class of the method. + + method_id: short + + failing method ID + + When the close is provoked by a method exception, this + is the ID of the method. + + """ + reply_code = args.read_short() + reply_text = args.read_shortstr() + class_id = args.read_short() + method_id = args.read_short() + + self._x_close_ok() + + raise error_for_code(reply_code, reply_text, + (class_id, method_id), ConnectionError) + + def _blocked(self, args): + """RabbitMQ Extension.""" + reason = args.read_shortstr() + if self.on_blocked: + return self.on_blocked(reason) + + def _unblocked(self, *args): + if self.on_unblocked: + return self.on_unblocked() + + def _x_close_ok(self): + """Confirm a connection close + + This method confirms a Connection.Close method and tells the + recipient that it is safe to release resources for the + connection and close the socket. + + RULE: + + A peer that detects a socket closure without having + received a Close-Ok handshake method SHOULD log the error. + + """ + self._send_method((10, 51)) + self._do_close() + + def _close_ok(self, args): + """Confirm a connection close + + This method confirms a Connection.Close method and tells the + recipient that it is safe to release resources for the + connection and close the socket. + + RULE: + + A peer that detects a socket closure without having + received a Close-Ok handshake method SHOULD log the error. + + """ + self._do_close() + + def _x_open(self, virtual_host, capabilities=''): + """Open connection to virtual host + + This method opens a connection to a virtual host, which is a + collection of resources, and acts to separate multiple + application domains within a server. + + RULE: + + The client MUST open the context before doing any work on + the connection. + + PARAMETERS: + virtual_host: shortstr + + virtual host name + + The name of the virtual host to work with. + + RULE: + + If the server supports multiple virtual hosts, it + MUST enforce a full separation of exchanges, + queues, and all associated entities per virtual + host. An application, connected to a specific + virtual host, MUST NOT be able to access resources + of another virtual host. + + RULE: + + The server SHOULD verify that the client has + permission to access the specified virtual host. + + RULE: + + The server MAY configure arbitrary limits per + virtual host, such as the number of each type of + entity that may be used, per connection and/or in + total. + + capabilities: shortstr + + required capabilities + + The client may specify a number of capability names, + delimited by spaces. The server can use this string + to how to process the client's connection request. + + """ + args = AMQPWriter() + args.write_shortstr(virtual_host) + args.write_shortstr(capabilities) + args.write_bit(False) + self._send_method((10, 40), args) + return self.wait(allowed_methods=[ + (10, 41), # Connection.open_ok + ]) + + def _open_ok(self, args): + """Signal that the connection is ready + + This method signals to the client that the connection is ready + for use. + + PARAMETERS: + known_hosts: shortstr (deprecated) + + """ + AMQP_LOGGER.debug('Open OK!') + + def _secure(self, args): + """Security mechanism challenge + + The SASL protocol works by exchanging challenges and responses + until both peers have received sufficient information to + authenticate each other. This method challenges the client to + provide more information. + + PARAMETERS: + challenge: longstr + + security challenge data + + Challenge information, a block of opaque binary data + passed to the security mechanism. + + """ + challenge = args.read_longstr() # noqa + + def _x_secure_ok(self, response): + """Security mechanism response + + This method attempts to authenticate, passing a block of SASL + data for the security mechanism at the server side. + + PARAMETERS: + response: longstr + + security response data + + A block of opaque data passed to the security + mechanism. The contents of this data are defined by + the SASL security mechanism. + + """ + args = AMQPWriter() + args.write_longstr(response) + self._send_method((10, 21), args) + + def _start(self, args): + """Start connection negotiation + + This method starts the connection negotiation process by + telling the client the protocol version that the server + proposes, along with a list of security mechanisms which the + client can use for authentication. + + RULE: + + If the client cannot handle the protocol version suggested + by the server it MUST close the socket connection. + + RULE: + + The server MUST provide a protocol version that is lower + than or equal to that requested by the client in the + protocol header. If the server cannot support the + specified protocol it MUST NOT send this method, but MUST + close the socket connection. + + PARAMETERS: + version_major: octet + + protocol major version + + The protocol major version that the server agrees to + use, which cannot be higher than the client's major + version. + + version_minor: octet + + protocol major version + + The protocol minor version that the server agrees to + use, which cannot be higher than the client's minor + version. + + server_properties: table + + server properties + + mechanisms: longstr + + available security mechanisms + + A list of the security mechanisms that the server + supports, delimited by spaces. Currently ASL supports + these mechanisms: PLAIN. + + locales: longstr + + available message locales + + A list of the message locales that the server + supports, delimited by spaces. The locale defines the + language in which the server will send reply texts. + + RULE: + + All servers MUST support at least the en_US + locale. + + """ + self.version_major = args.read_octet() + self.version_minor = args.read_octet() + self.server_properties = args.read_table() + self.mechanisms = args.read_longstr().split(' ') + self.locales = args.read_longstr().split(' ') + + AMQP_LOGGER.debug( + START_DEBUG_FMT, + self.version_major, self.version_minor, + self.server_properties, self.mechanisms, self.locales, + ) + + def _x_start_ok(self, client_properties, mechanism, response, locale): + """Select security mechanism and locale + + This method selects a SASL security mechanism. ASL uses SASL + (RFC2222) to negotiate authentication and encryption. + + PARAMETERS: + client_properties: table + + client properties + + mechanism: shortstr + + selected security mechanism + + A single security mechanisms selected by the client, + which must be one of those specified by the server. + + RULE: + + The client SHOULD authenticate using the highest- + level security profile it can handle from the list + provided by the server. + + RULE: + + The mechanism field MUST contain one of the + security mechanisms proposed by the server in the + Start method. If it doesn't, the server MUST close + the socket. + + response: longstr + + security response data + + A block of opaque data passed to the security + mechanism. The contents of this data are defined by + the SASL security mechanism. For the PLAIN security + mechanism this is defined as a field table holding two + fields, LOGIN and PASSWORD. + + locale: shortstr + + selected message locale + + A single message local selected by the client, which + must be one of those specified by the server. + + """ + if self.server_capabilities.get('consumer_cancel_notify'): + if 'capabilities' not in client_properties: + client_properties['capabilities'] = {} + client_properties['capabilities']['consumer_cancel_notify'] = True + if self.server_capabilities.get('connection.blocked'): + if 'capabilities' not in client_properties: + client_properties['capabilities'] = {} + client_properties['capabilities']['connection.blocked'] = True + args = AMQPWriter() + args.write_table(client_properties) + args.write_shortstr(mechanism) + args.write_longstr(response) + args.write_shortstr(locale) + self._send_method((10, 11), args) + + def _tune(self, args): + """Propose connection tuning parameters + + This method proposes a set of connection configuration values + to the client. The client can accept and/or adjust these. + + PARAMETERS: + channel_max: short + + proposed maximum channels + + The maximum total number of channels that the server + allows per connection. Zero means that the server does + not impose a fixed limit, but the number of allowed + channels may be limited by available server resources. + + frame_max: long + + proposed maximum frame size + + The largest frame size that the server proposes for + the connection. The client can negotiate a lower + value. Zero means that the server does not impose any + specific limit but may reject very large frames if it + cannot allocate resources for them. + + RULE: + + Until the frame-max has been negotiated, both + peers MUST accept frames of up to 4096 octets + large. The minimum non-zero value for the frame- + max field is 4096. + + heartbeat: short + + desired heartbeat delay + + The delay, in seconds, of the connection heartbeat + that the server wants. Zero means the server does not + want a heartbeat. + + """ + client_heartbeat = self.client_heartbeat or 0 + self.channel_max = args.read_short() or self.channel_max + self.frame_max = args.read_long() or self.frame_max + self.method_writer.frame_max = self.frame_max + self.server_heartbeat = args.read_short() or 0 + + # negotiate the heartbeat interval to the smaller of the + # specified values + if self.server_heartbeat == 0 or client_heartbeat == 0: + self.heartbeat = max(self.server_heartbeat, client_heartbeat) + else: + self.heartbeat = min(self.server_heartbeat, client_heartbeat) + + # Ignore server heartbeat if client_heartbeat is disabled + if not self.client_heartbeat: + self.heartbeat = 0 + + self._x_tune_ok(self.channel_max, self.frame_max, self.heartbeat) + + def send_heartbeat(self): + self.transport.write_frame(8, 0, bytes()) + + def heartbeat_tick(self, rate=2): + """Send heartbeat packets, if necessary, and fail if none have been + received recently. This should be called frequently, on the order of + once per second. + + :keyword rate: Ignored + """ + if not self.heartbeat: + return + + # treat actual data exchange in either direction as a heartbeat + sent_now = self.method_writer.bytes_sent + recv_now = self.method_reader.bytes_recv + if self.prev_sent is None or self.prev_sent != sent_now: + self.last_heartbeat_sent = monotonic() + if self.prev_recv is None or self.prev_recv != recv_now: + self.last_heartbeat_received = monotonic() + self.prev_sent, self.prev_recv = sent_now, recv_now + + # send a heartbeat if it's time to do so + if monotonic() > self.last_heartbeat_sent + self.heartbeat: + self.send_heartbeat() + self.last_heartbeat_sent = monotonic() + + # if we've missed two intervals' heartbeats, fail; this gives the + # server enough time to send heartbeats a little late + if (self.last_heartbeat_received and + self.last_heartbeat_received + 2 * + self.heartbeat < monotonic()): + raise ConnectionForced('Too many heartbeats missed') + + def _x_tune_ok(self, channel_max, frame_max, heartbeat): + """Negotiate connection tuning parameters + + This method sends the client's connection tuning parameters to + the server. Certain fields are negotiated, others provide + capability information. + + PARAMETERS: + channel_max: short + + negotiated maximum channels + + The maximum total number of channels that the client + will use per connection. May not be higher than the + value specified by the server. + + RULE: + + The server MAY ignore the channel-max value or MAY + use it for tuning its resource allocation. + + frame_max: long + + negotiated maximum frame size + + The largest frame size that the client and server will + use for the connection. Zero means that the client + does not impose any specific limit but may reject very + large frames if it cannot allocate resources for them. + Note that the frame-max limit applies principally to + content frames, where large contents can be broken + into frames of arbitrary size. + + RULE: + + Until the frame-max has been negotiated, both + peers must accept frames of up to 4096 octets + large. The minimum non-zero value for the frame- + max field is 4096. + + heartbeat: short + + desired heartbeat delay + + The delay, in seconds, of the connection heartbeat + that the client wants. Zero means the client does not + want a heartbeat. + + """ + args = AMQPWriter() + args.write_short(channel_max) + args.write_long(frame_max) + args.write_short(heartbeat or 0) + self._send_method((10, 31), args) + self._wait_tune_ok = False + + @property + def sock(self): + return self.transport.sock + + @property + def server_capabilities(self): + return self.server_properties.get('capabilities') or {} + + _METHOD_MAP = { + (10, 10): _start, + (10, 20): _secure, + (10, 30): _tune, + (10, 41): _open_ok, + (10, 50): _close, + (10, 51): _close_ok, + (10, 60): _blocked, + (10, 61): _unblocked, + } + + _IMMEDIATE_METHODS = [] + connection_errors = ( + ConnectionError, + socket.error, + IOError, + OSError, + ) + channel_errors = (ChannelError, ) + recoverable_connection_errors = ( + RecoverableConnectionError, + socket.error, + IOError, + OSError, + ) + recoverable_channel_errors = ( + RecoverableChannelError, + ) diff --git a/thesisenv/lib/python3.6/site-packages/amqp/exceptions.py b/thesisenv/lib/python3.6/site-packages/amqp/exceptions.py new file mode 100644 index 0000000..6a0287b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp/exceptions.py @@ -0,0 +1,262 @@ +"""Exceptions used by amqp""" +# Copyright (C) 2007-2008 Barry Pederson +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 +from __future__ import absolute_import + +from struct import pack, unpack + +__all__ = [ + 'AMQPError', + 'ConnectionError', 'ChannelError', + 'RecoverableConnectionError', 'IrrecoverableConnectionError', + 'RecoverableChannelError', 'IrrecoverableChannelError', + 'ConsumerCancelled', 'ContentTooLarge', 'NoConsumers', + 'ConnectionForced', 'InvalidPath', 'AccessRefused', 'NotFound', + 'ResourceLocked', 'PreconditionFailed', 'FrameError', 'FrameSyntaxError', + 'InvalidCommand', 'ChannelNotOpen', 'UnexpectedFrame', 'ResourceError', + 'NotConfirmed', 'NotAllowed', 'AMQPNotImplementedError', 'InternalError', +] + + +class AMQPError(Exception): + code = 0 + + def __init__(self, reply_text=None, method_sig=None, + method_name=None, reply_code=None): + self.message = reply_text + self.reply_code = reply_code or self.code + self.reply_text = reply_text + self.method_sig = method_sig + self.method_name = method_name or '' + if method_sig and not self.method_name: + self.method_name = METHOD_NAME_MAP.get(method_sig, '') + Exception.__init__(self, reply_code, + reply_text, method_sig, self.method_name) + + def __str__(self): + if self.method: + return '{0.method}: ({0.reply_code}) {0.reply_text}'.format(self) + return self.reply_text or '' + + @property + def method(self): + return self.method_name or self.method_sig + + +class ConnectionError(AMQPError): + pass + + +class ChannelError(AMQPError): + pass + + +class RecoverableChannelError(ChannelError): + pass + + +class IrrecoverableChannelError(ChannelError): + pass + + +class RecoverableConnectionError(ConnectionError): + pass + + +class IrrecoverableConnectionError(ConnectionError): + pass + + +class Blocked(RecoverableConnectionError): + pass + + +class ConsumerCancelled(RecoverableConnectionError): + pass + + +class ContentTooLarge(RecoverableChannelError): + code = 311 + + +class NoConsumers(RecoverableChannelError): + code = 313 + + +class ConnectionForced(RecoverableConnectionError): + code = 320 + + +class InvalidPath(IrrecoverableConnectionError): + code = 402 + + +class AccessRefused(IrrecoverableChannelError): + code = 403 + + +class NotFound(IrrecoverableChannelError): + code = 404 + + +class NotConfirmed(RecoverableConnectionError): + pass + + +class ResourceLocked(RecoverableChannelError): + code = 405 + + +class PreconditionFailed(IrrecoverableChannelError): + code = 406 + + +class FrameError(IrrecoverableConnectionError): + code = 501 + + +class FrameSyntaxError(IrrecoverableConnectionError): + code = 502 + + +class InvalidCommand(IrrecoverableConnectionError): + code = 503 + + +class ChannelNotOpen(IrrecoverableConnectionError): + code = 504 + + +class UnexpectedFrame(IrrecoverableConnectionError): + code = 505 + + +class ResourceError(RecoverableConnectionError): + code = 506 + + +class NotAllowed(IrrecoverableConnectionError): + code = 530 + + +class AMQPNotImplementedError(IrrecoverableConnectionError): + code = 540 + + +class InternalError(IrrecoverableConnectionError): + code = 541 + + +ERROR_MAP = { + 311: ContentTooLarge, + 313: NoConsumers, + 320: ConnectionForced, + 402: InvalidPath, + 403: AccessRefused, + 404: NotFound, + 405: ResourceLocked, + 406: PreconditionFailed, + 501: FrameError, + 502: FrameSyntaxError, + 503: InvalidCommand, + 504: ChannelNotOpen, + 505: UnexpectedFrame, + 506: ResourceError, + 530: NotAllowed, + 540: AMQPNotImplementedError, + 541: InternalError, +} + + +def error_for_code(code, text, method, default): + try: + return ERROR_MAP[code](text, method, reply_code=code) + except KeyError: + return default(text, method, reply_code=code) + + +def raise_for_code(code, text, method, default): + raise error_for_code(code, text, method, default) + + +METHOD_NAME_MAP = { + (10, 10): 'Connection.start', + (10, 11): 'Connection.start_ok', + (10, 20): 'Connection.secure', + (10, 21): 'Connection.secure_ok', + (10, 30): 'Connection.tune', + (10, 31): 'Connection.tune_ok', + (10, 40): 'Connection.open', + (10, 41): 'Connection.open_ok', + (10, 50): 'Connection.close', + (10, 51): 'Connection.close_ok', + (20, 10): 'Channel.open', + (20, 11): 'Channel.open_ok', + (20, 20): 'Channel.flow', + (20, 21): 'Channel.flow_ok', + (20, 40): 'Channel.close', + (20, 41): 'Channel.close_ok', + (30, 10): 'Access.request', + (30, 11): 'Access.request_ok', + (40, 10): 'Exchange.declare', + (40, 11): 'Exchange.declare_ok', + (40, 20): 'Exchange.delete', + (40, 21): 'Exchange.delete_ok', + (40, 30): 'Exchange.bind', + (40, 31): 'Exchange.bind_ok', + (40, 40): 'Exchange.unbind', + (40, 41): 'Exchange.unbind_ok', + (50, 10): 'Queue.declare', + (50, 11): 'Queue.declare_ok', + (50, 20): 'Queue.bind', + (50, 21): 'Queue.bind_ok', + (50, 30): 'Queue.purge', + (50, 31): 'Queue.purge_ok', + (50, 40): 'Queue.delete', + (50, 41): 'Queue.delete_ok', + (50, 50): 'Queue.unbind', + (50, 51): 'Queue.unbind_ok', + (60, 10): 'Basic.qos', + (60, 11): 'Basic.qos_ok', + (60, 20): 'Basic.consume', + (60, 21): 'Basic.consume_ok', + (60, 30): 'Basic.cancel', + (60, 31): 'Basic.cancel_ok', + (60, 40): 'Basic.publish', + (60, 50): 'Basic.return', + (60, 60): 'Basic.deliver', + (60, 70): 'Basic.get', + (60, 71): 'Basic.get_ok', + (60, 72): 'Basic.get_empty', + (60, 80): 'Basic.ack', + (60, 90): 'Basic.reject', + (60, 100): 'Basic.recover_async', + (60, 110): 'Basic.recover', + (60, 111): 'Basic.recover_ok', + (60, 120): 'Basic.nack', + (90, 10): 'Tx.select', + (90, 11): 'Tx.select_ok', + (90, 20): 'Tx.commit', + (90, 21): 'Tx.commit_ok', + (90, 30): 'Tx.rollback', + (90, 31): 'Tx.rollback_ok', + (85, 10): 'Confirm.select', + (85, 11): 'Confirm.select_ok', +} + + +for _method_id, _method_name in list(METHOD_NAME_MAP.items()): + METHOD_NAME_MAP[unpack('>I', pack('>HH', *_method_id))[0]] = _method_name diff --git a/thesisenv/lib/python3.6/site-packages/amqp/five.py b/thesisenv/lib/python3.6/site-packages/amqp/five.py new file mode 100644 index 0000000..8b281db --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp/five.py @@ -0,0 +1,191 @@ +# -*- coding: utf-8 -*- +""" + celery.five + ~~~~~~~~~~~ + + Compatibility implementations of features + only available in newer Python versions. + + +""" +from __future__ import absolute_import + +# ############# py3k ######################################################### +import sys +PY3 = sys.version_info[0] == 3 + +try: + reload = reload # noqa +except NameError: # pragma: no cover + from imp import reload # noqa + +try: + from UserList import UserList # noqa +except ImportError: # pragma: no cover + from collections import UserList # noqa + +try: + from UserDict import UserDict # noqa +except ImportError: # pragma: no cover + from collections import UserDict # noqa + + +if PY3: + import builtins + + from queue import Queue, Empty + from itertools import zip_longest + from io import StringIO, BytesIO + + map = map + string = str + string_t = str + long_t = int + text_t = str + range = range + int_types = (int, ) + + open_fqdn = 'builtins.open' + + def items(d): + return d.items() + + def keys(d): + return d.keys() + + def values(d): + return d.values() + + def nextfun(it): + return it.__next__ + + exec_ = getattr(builtins, 'exec') + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + + class WhateverIO(StringIO): + + def write(self, data): + if isinstance(data, bytes): + data = data.encode() + StringIO.write(self, data) + +else: + import __builtin__ as builtins # noqa + from Queue import Queue, Empty # noqa + from itertools import imap as map, izip_longest as zip_longest # noqa + from StringIO import StringIO # noqa + string = unicode # noqa + string_t = basestring # noqa + text_t = unicode + long_t = long # noqa + range = xrange + int_types = (int, long) + + open_fqdn = '__builtin__.open' + + def items(d): # noqa + return d.iteritems() + + def keys(d): # noqa + return d.iterkeys() + + def values(d): # noqa + return d.itervalues() + + def nextfun(it): # noqa + return it.next + + def exec_(code, globs=None, locs=None): + """Execute code in a namespace.""" + if globs is None: + frame = sys._getframe(1) + globs = frame.f_globals + if locs is None: + locs = frame.f_locals + del frame + elif locs is None: + locs = globs + exec("""exec code in globs, locs""") + + exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""") + + BytesIO = WhateverIO = StringIO # noqa + + +def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])): + """Class decorator to set metaclass. + + Works with both Python 3 and Python 3 and it does not add + an extra class in the lookup order like ``six.with_metaclass`` does + (that is -- it copies the original class instead of using inheritance). + + """ + + def _clone_with_metaclass(Class): + attrs = dict((key, value) for key, value in items(vars(Class)) + if key not in skip_attrs) + return Type(Class.__name__, Class.__bases__, attrs) + + return _clone_with_metaclass + +# ############# time.monotonic ################################################ + +if sys.version_info < (3, 3): + + import platform + SYSTEM = platform.system() + + try: + import ctypes + except ImportError: # pragma: no cover + ctypes = None # noqa + + if SYSTEM == 'Darwin' and ctypes is not None: + from ctypes.util import find_library + libSystem = ctypes.CDLL(find_library('libSystem.dylib')) + CoreServices = ctypes.CDLL(find_library('CoreServices'), + use_errno=True) + mach_absolute_time = libSystem.mach_absolute_time + mach_absolute_time.restype = ctypes.c_uint64 + absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds + absolute_to_nanoseconds.restype = ctypes.c_uint64 + absolute_to_nanoseconds.argtypes = [ctypes.c_uint64] + + def _monotonic(): + return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9 + + elif SYSTEM == 'Linux' and ctypes is not None: + # from stackoverflow: + # questions/1205722/how-do-i-get-monotonic-time-durations-in-python + import os + + CLOCK_MONOTONIC = 1 # see + + class timespec(ctypes.Structure): + _fields_ = [ + ('tv_sec', ctypes.c_long), + ('tv_nsec', ctypes.c_long), + ] + + librt = ctypes.CDLL('librt.so.1', use_errno=True) + clock_gettime = librt.clock_gettime + clock_gettime.argtypes = [ + ctypes.c_int, ctypes.POINTER(timespec), + ] + + def _monotonic(): # noqa + t = timespec() + if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0: + errno_ = ctypes.get_errno() + raise OSError(errno_, os.strerror(errno_)) + return t.tv_sec + t.tv_nsec * 1e-9 + else: + from time import time as _monotonic +try: + from time import monotonic +except ImportError: + monotonic = _monotonic # noqa diff --git a/thesisenv/lib/python3.6/site-packages/amqp/method_framing.py b/thesisenv/lib/python3.6/site-packages/amqp/method_framing.py new file mode 100644 index 0000000..b454524 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp/method_framing.py @@ -0,0 +1,231 @@ +"""Convert between frames and higher-level AMQP methods""" +# Copyright (C) 2007-2008 Barry Pederson +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 +from __future__ import absolute_import + +from collections import defaultdict, deque +from struct import pack, unpack + +from .basic_message import Message +from .exceptions import AMQPError, UnexpectedFrame +from .five import range, string +from .serialization import AMQPReader + +__all__ = ['MethodReader'] + +# +# MethodReader needs to know which methods are supposed +# to be followed by content headers and bodies. +# +_CONTENT_METHODS = [ + (60, 50), # Basic.return + (60, 60), # Basic.deliver + (60, 71), # Basic.get_ok +] + + +class _PartialMessage(object): + """Helper class to build up a multi-frame method.""" + + def __init__(self, method_sig, args, channel): + self.method_sig = method_sig + self.args = args + self.msg = Message() + self.body_parts = [] + self.body_received = 0 + self.body_size = None + self.complete = False + + def add_header(self, payload): + class_id, weight, self.body_size = unpack('>HHQ', payload[:12]) + self.msg._load_properties(payload[12:]) + self.complete = (self.body_size == 0) + + def add_payload(self, payload): + parts = self.body_parts + self.body_received += len(payload) + if self.body_received == self.body_size: + if parts: + parts.append(payload) + self.msg.body = bytes().join(parts) + else: + self.msg.body = payload + self.complete = True + else: + parts.append(payload) + + +class MethodReader(object): + """Helper class to receive frames from the broker, combine them if + necessary with content-headers and content-bodies into complete methods. + + Normally a method is represented as a tuple containing + (channel, method_sig, args, content). + + In the case of a framing error, an :exc:`ConnectionError` is placed + in the queue. + + In the case of unexpected frames, a tuple made up of + ``(channel, ChannelError)`` is placed in the queue. + + """ + + def __init__(self, source): + self.source = source + self.queue = deque() + self.running = False + self.partial_messages = {} + self.heartbeats = 0 + # For each channel, which type is expected next + self.expected_types = defaultdict(lambda: 1) + # not an actual byte count, just incremented whenever we receive + self.bytes_recv = 0 + self._quick_put = self.queue.append + self._quick_get = self.queue.popleft + + def _next_method(self): + """Read the next method from the source, once one complete method has + been assembled it is placed in the internal queue.""" + queue = self.queue + put = self._quick_put + read_frame = self.source.read_frame + while not queue: + try: + frame_type, channel, payload = read_frame() + except Exception as exc: + # + # Connection was closed? Framing Error? + # + put(exc) + break + + self.bytes_recv += 1 + + if frame_type not in (self.expected_types[channel], 8): + put(( + channel, + UnexpectedFrame( + 'Received frame {0} while expecting type: {1}'.format( + frame_type, self.expected_types[channel])))) + elif frame_type == 1: + self._process_method_frame(channel, payload) + elif frame_type == 2: + self._process_content_header(channel, payload) + elif frame_type == 3: + self._process_content_body(channel, payload) + elif frame_type == 8: + self._process_heartbeat(channel, payload) + + def _process_heartbeat(self, channel, payload): + self.heartbeats += 1 + + def _process_method_frame(self, channel, payload): + """Process Method frames""" + method_sig = unpack('>HH', payload[:4]) + args = AMQPReader(payload[4:]) + + if method_sig in _CONTENT_METHODS: + # + # Save what we've got so far and wait for the content-header + # + self.partial_messages[channel] = _PartialMessage( + method_sig, args, channel, + ) + self.expected_types[channel] = 2 + else: + self._quick_put((channel, method_sig, args, None)) + + def _process_content_header(self, channel, payload): + """Process Content Header frames""" + partial = self.partial_messages[channel] + partial.add_header(payload) + + if partial.complete: + # + # a bodyless message, we're done + # + self._quick_put((channel, partial.method_sig, + partial.args, partial.msg)) + self.partial_messages.pop(channel, None) + self.expected_types[channel] = 1 + else: + # + # wait for the content-body + # + self.expected_types[channel] = 3 + + def _process_content_body(self, channel, payload): + """Process Content Body frames""" + partial = self.partial_messages[channel] + partial.add_payload(payload) + if partial.complete: + # + # Stick the message in the queue and go back to + # waiting for method frames + # + self._quick_put((channel, partial.method_sig, + partial.args, partial.msg)) + self.partial_messages.pop(channel, None) + self.expected_types[channel] = 1 + + def read_method(self): + """Read a method from the peer.""" + self._next_method() + m = self._quick_get() + if isinstance(m, Exception): + raise m + if isinstance(m, tuple) and isinstance(m[1], AMQPError): + raise m[1] + return m + + +class MethodWriter(object): + """Convert AMQP methods into AMQP frames and send them out + to the peer.""" + + def __init__(self, dest, frame_max): + self.dest = dest + self.frame_max = frame_max + self.bytes_sent = 0 + + def write_method(self, channel, method_sig, args, content=None): + write_frame = self.dest.write_frame + payload = pack('>HH', method_sig[0], method_sig[1]) + args + + if content: + # do this early, so we can raise an exception if there's a + # problem with the content properties, before sending the + # first frame + body = content.body + if isinstance(body, string): + coding = content.properties.get('content_encoding', None) + if coding is None: + coding = content.properties['content_encoding'] = 'UTF-8' + + body = body.encode(coding) + properties = content._serialize_properties() + + write_frame(1, channel, payload) + + if content: + payload = pack('>HHQ', method_sig[0], 0, len(body)) + properties + + write_frame(2, channel, payload) + + chunk_size = self.frame_max - 8 + for i in range(0, len(body), chunk_size): + write_frame(3, channel, body[i:i + chunk_size]) + self.bytes_sent += 1 diff --git a/thesisenv/lib/python3.6/site-packages/amqp/protocol.py b/thesisenv/lib/python3.6/site-packages/amqp/protocol.py new file mode 100644 index 0000000..0856eb4 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp/protocol.py @@ -0,0 +1,13 @@ +from __future__ import absolute_import + +from collections import namedtuple + + +queue_declare_ok_t = namedtuple( + 'queue_declare_ok_t', ('queue', 'message_count', 'consumer_count'), +) + +basic_return_t = namedtuple( + 'basic_return_t', + ('reply_code', 'reply_text', 'exchange', 'routing_key', 'message'), +) diff --git a/thesisenv/lib/python3.6/site-packages/amqp/serialization.py b/thesisenv/lib/python3.6/site-packages/amqp/serialization.py new file mode 100644 index 0000000..4b54336 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp/serialization.py @@ -0,0 +1,509 @@ +""" +Convert between bytestreams and higher-level AMQP types. + +2007-11-05 Barry Pederson + +""" +# Copyright (C) 2007 Barry Pederson +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 +from __future__ import absolute_import + +import calendar +import sys + +from datetime import datetime +from decimal import Decimal +from io import BytesIO +from struct import pack, unpack + +from .exceptions import FrameSyntaxError +from .five import int_types, long_t, string, string_t, items + +IS_PY3K = sys.version_info[0] >= 3 + +if IS_PY3K: + def byte(n): + return bytes([n]) +else: + byte = chr + + +ILLEGAL_TABLE_TYPE_WITH_KEY = """\ +Table type {0!r} for key {1!r} not handled by amqp. [value: {2!r}] +""" + +ILLEGAL_TABLE_TYPE = """\ + Table type {0!r} not handled by amqp. [value: {1!r}] +""" + + +class AMQPReader(object): + """Read higher-level AMQP types from a bytestream.""" + def __init__(self, source): + """Source should be either a file-like object with a read() method, or + a plain (non-unicode) string.""" + if isinstance(source, bytes): + self.input = BytesIO(source) + elif hasattr(source, 'read'): + self.input = source + else: + raise ValueError( + 'AMQPReader needs a file-like object or plain string') + + self.bitcount = self.bits = 0 + + def close(self): + self.input.close() + + def read(self, n): + """Read n bytes.""" + self.bitcount = self.bits = 0 + return self.input.read(n) + + def read_bit(self): + """Read a single boolean value.""" + if not self.bitcount: + self.bits = ord(self.input.read(1)) + self.bitcount = 8 + result = (self.bits & 1) == 1 + self.bits >>= 1 + self.bitcount -= 1 + return result + + def read_octet(self): + """Read one byte, return as an integer""" + self.bitcount = self.bits = 0 + return unpack('B', self.input.read(1))[0] + + def read_short(self): + """Read an unsigned 16-bit integer""" + self.bitcount = self.bits = 0 + return unpack('>H', self.input.read(2))[0] + + def read_long(self): + """Read an unsigned 32-bit integer""" + self.bitcount = self.bits = 0 + return unpack('>I', self.input.read(4))[0] + + def read_longlong(self): + """Read an unsigned 64-bit integer""" + self.bitcount = self.bits = 0 + return unpack('>Q', self.input.read(8))[0] + + def read_float(self): + """Read float value.""" + self.bitcount = self.bits = 0 + return unpack('>d', self.input.read(8))[0] + + def read_shortstr(self): + """Read a short string that's stored in up to 255 bytes. + + The encoding isn't specified in the AMQP spec, so + assume it's utf-8 + + """ + self.bitcount = self.bits = 0 + slen = unpack('B', self.input.read(1))[0] + return self.input.read(slen).decode('utf-8') + + def read_longstr(self): + """Read a string that's up to 2**32 bytes. + + The encoding isn't specified in the AMQP spec, so + assume it's utf-8 + + """ + self.bitcount = self.bits = 0 + slen = unpack('>I', self.input.read(4))[0] + return self.input.read(slen).decode('utf-8') + + def read_table(self): + """Read an AMQP table, and return as a Python dictionary.""" + self.bitcount = self.bits = 0 + tlen = unpack('>I', self.input.read(4))[0] + table_data = AMQPReader(self.input.read(tlen)) + result = {} + while table_data.input.tell() < tlen: + name = table_data.read_shortstr() + val = table_data.read_item() + result[name] = val + return result + + def read_item(self): + ftype = ord(self.input.read(1)) + + # 'S': long string + if ftype == 83: + val = self.read_longstr() + # 's': short string + elif ftype == 115: + val = self.read_shortstr() + # 'b': short-short int + elif ftype == 98: + val, = unpack('>B', self.input.read(1)) + # 'B': short-short unsigned int + elif ftype == 66: + val, = unpack('>b', self.input.read(1)) + # 'U': short int + elif ftype == 85: + val, = unpack('>h', self.input.read(2)) + # 'u': short unsigned int + elif ftype == 117: + val, = unpack('>H', self.input.read(2)) + # 'I': long int + elif ftype == 73: + val, = unpack('>i', self.input.read(4)) + # 'i': long unsigned int + elif ftype == 105: # 'l' + val, = unpack('>I', self.input.read(4)) + # 'L': long long int + elif ftype == 76: + val, = unpack('>q', self.input.read(8)) + # 'l': long long unsigned int + elif ftype == 108: + val, = unpack('>Q', self.input.read(8)) + # 'f': float + elif ftype == 102: + val, = unpack('>f', self.input.read(4)) + # 'd': double + elif ftype == 100: + val = self.read_float() + # 'D': decimal + elif ftype == 68: + d = self.read_octet() + n, = unpack('>i', self.input.read(4)) + val = Decimal(n) / Decimal(10 ** d) + # 'F': table + elif ftype == 70: + val = self.read_table() # recurse + # 'A': array + elif ftype == 65: + val = self.read_array() + # 't' (bool) + elif ftype == 116: + val = self.read_bit() + # 'T': timestamp + elif ftype == 84: + val = self.read_timestamp() + # 'V': void + elif ftype == 86: + val = None + else: + raise FrameSyntaxError( + 'Unknown value in table: {0!r} ({1!r})'.format( + ftype, type(ftype))) + return val + + def read_array(self): + array_length = unpack('>I', self.input.read(4))[0] + array_data = AMQPReader(self.input.read(array_length)) + result = [] + while array_data.input.tell() < array_length: + val = array_data.read_item() + result.append(val) + return result + + def read_timestamp(self): + """Read and AMQP timestamp, which is a 64-bit integer representing + seconds since the Unix epoch in 1-second resolution. + + Return as a Python datetime.datetime object, + expressed as localtime. + + """ + return datetime.utcfromtimestamp(self.read_longlong()) + + +class AMQPWriter(object): + """Convert higher-level AMQP types to bytestreams.""" + + def __init__(self, dest=None): + """dest may be a file-type object (with a write() method). If None + then a BytesIO is created, and the contents can be accessed with + this class's getvalue() method.""" + self.out = BytesIO() if dest is None else dest + self.bits = [] + self.bitcount = 0 + + def _flushbits(self): + if self.bits: + out = self.out + for b in self.bits: + out.write(pack('B', b)) + self.bits = [] + self.bitcount = 0 + + def close(self): + """Pass through if possible to any file-like destinations.""" + try: + self.out.close() + except AttributeError: + pass + + def flush(self): + """Pass through if possible to any file-like destinations.""" + try: + self.out.flush() + except AttributeError: + pass + + def getvalue(self): + """Get what's been encoded so far if we're working with a BytesIO.""" + self._flushbits() + return self.out.getvalue() + + def write(self, s): + """Write a plain Python string with no special encoding in Python 2.x, + or bytes in Python 3.x""" + self._flushbits() + self.out.write(s) + + def write_bit(self, b): + """Write a boolean value.""" + b = 1 if b else 0 + shift = self.bitcount % 8 + if shift == 0: + self.bits.append(0) + self.bits[-1] |= (b << shift) + self.bitcount += 1 + + def write_octet(self, n): + """Write an integer as an unsigned 8-bit value.""" + if n < 0 or n > 255: + raise FrameSyntaxError( + 'Octet {0!r} out of range 0..255'.format(n)) + self._flushbits() + self.out.write(pack('B', n)) + + def write_short(self, n): + """Write an integer as an unsigned 16-bit value.""" + if n < 0 or n > 65535: + raise FrameSyntaxError( + 'Octet {0!r} out of range 0..65535'.format(n)) + self._flushbits() + self.out.write(pack('>H', int(n))) + + def write_long(self, n): + """Write an integer as an unsigned2 32-bit value.""" + if n < 0 or n >= 4294967296: + raise FrameSyntaxError( + 'Octet {0!r} out of range 0..2**31-1'.format(n)) + self._flushbits() + self.out.write(pack('>I', n)) + + def write_longlong(self, n): + """Write an integer as an unsigned 64-bit value.""" + if n < 0 or n >= 18446744073709551616: + raise FrameSyntaxError( + 'Octet {0!r} out of range 0..2**64-1'.format(n)) + self._flushbits() + self.out.write(pack('>Q', n)) + + def write_shortstr(self, s): + """Write a string up to 255 bytes long (after any encoding). + + If passed a unicode string, encode with UTF-8. + + """ + self._flushbits() + if isinstance(s, string): + s = s.encode('utf-8') + if len(s) > 255: + raise FrameSyntaxError( + 'Shortstring overflow ({0} > 255)'.format(len(s))) + self.write_octet(len(s)) + self.out.write(s) + + def write_longstr(self, s): + """Write a string up to 2**32 bytes long after encoding. + + If passed a unicode string, encode as UTF-8. + + """ + self._flushbits() + if isinstance(s, string): + s = s.encode('utf-8') + self.write_long(len(s)) + self.out.write(s) + + def write_table(self, d): + """Write out a Python dictionary made of up string keys, and values + that are strings, signed integers, Decimal, datetime.datetime, or + sub-dictionaries following the same constraints.""" + self._flushbits() + table_data = AMQPWriter() + for k, v in items(d): + table_data.write_shortstr(k) + table_data.write_item(v, k) + table_data = table_data.getvalue() + self.write_long(len(table_data)) + self.out.write(table_data) + + def write_item(self, v, k=None): + if isinstance(v, (string_t, bytes)): + if isinstance(v, string): + v = v.encode('utf-8') + self.write(b'S') + self.write_longstr(v) + elif isinstance(v, bool): + self.write(pack('>cB', b't', int(v))) + elif isinstance(v, float): + self.write(pack('>cd', b'd', v)) + elif isinstance(v, int_types): + self.write(pack('>ci', b'I', v)) + elif isinstance(v, Decimal): + self.write(b'D') + sign, digits, exponent = v.as_tuple() + v = 0 + for d in digits: + v = (v * 10) + d + if sign: + v = -v + self.write_octet(-exponent) + self.write(pack('>i', v)) + elif isinstance(v, datetime): + self.write(b'T') + self.write_timestamp(v) + elif isinstance(v, dict): + self.write(b'F') + self.write_table(v) + elif isinstance(v, (list, tuple)): + self.write(b'A') + self.write_array(v) + elif v is None: + self.write(b'V') + else: + err = (ILLEGAL_TABLE_TYPE_WITH_KEY.format(type(v), k, v) if k + else ILLEGAL_TABLE_TYPE.format(type(v), v)) + raise FrameSyntaxError(err) + + def write_array(self, a): + array_data = AMQPWriter() + for v in a: + array_data.write_item(v) + array_data = array_data.getvalue() + self.write_long(len(array_data)) + self.out.write(array_data) + + def write_timestamp(self, v): + """Write out a Python datetime.datetime object as a 64-bit integer + representing seconds since the Unix epoch.""" + self.out.write(pack('>Q', long_t(calendar.timegm(v.utctimetuple())))) + + +class GenericContent(object): + """Abstract base class for AMQP content. + + Subclasses should override the PROPERTIES attribute. + + """ + PROPERTIES = [('dummy', 'shortstr')] + + def __init__(self, **props): + """Save the properties appropriate to this AMQP content type + in a 'properties' dictionary.""" + d = {} + for propname, _ in self.PROPERTIES: + if propname in props: + d[propname] = props[propname] + # FIXME: should we ignore unknown properties? + + self.properties = d + + def __eq__(self, other): + """Check if this object has the same properties as another + content object.""" + try: + return self.properties == other.properties + except AttributeError: + return NotImplemented + + def __getattr__(self, name): + """Look for additional properties in the 'properties' + dictionary, and if present - the 'delivery_info' + dictionary.""" + if name == '__setstate__': + # Allows pickling/unpickling to work + raise AttributeError('__setstate__') + + if name in self.properties: + return self.properties[name] + + if 'delivery_info' in self.__dict__ \ + and name in self.delivery_info: + return self.delivery_info[name] + + raise AttributeError(name) + + def _load_properties(self, raw_bytes): + """Given the raw bytes containing the property-flags and property-list + from a content-frame-header, parse and insert into a dictionary + stored in this object as an attribute named 'properties'.""" + r = AMQPReader(raw_bytes) + + # + # Read 16-bit shorts until we get one with a low bit set to zero + # + flags = [] + while 1: + flag_bits = r.read_short() + flags.append(flag_bits) + if flag_bits & 1 == 0: + break + + shift = 0 + d = {} + for key, proptype in self.PROPERTIES: + if shift == 0: + if not flags: + break + flag_bits, flags = flags[0], flags[1:] + shift = 15 + if flag_bits & (1 << shift): + d[key] = getattr(r, 'read_' + proptype)() + shift -= 1 + + self.properties = d + + def _serialize_properties(self): + """serialize the 'properties' attribute (a dictionary) into + the raw bytes making up a set of property flags and a + property list, suitable for putting into a content frame header.""" + shift = 15 + flag_bits = 0 + flags = [] + raw_bytes = AMQPWriter() + for key, proptype in self.PROPERTIES: + val = self.properties.get(key, None) + if val is not None: + if shift == 0: + flags.append(flag_bits) + flag_bits = 0 + shift = 15 + + flag_bits |= (1 << shift) + if proptype != 'bit': + getattr(raw_bytes, 'write_' + proptype)(val) + + shift -= 1 + + flags.append(flag_bits) + result = AMQPWriter() + for flag_bits in flags: + result.write_short(flag_bits) + result.write(raw_bytes.getvalue()) + + return result.getvalue() diff --git a/thesisenv/lib/python3.6/site-packages/amqp/tests/__init__.py b/thesisenv/lib/python3.6/site-packages/amqp/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/amqp/tests/case.py b/thesisenv/lib/python3.6/site-packages/amqp/tests/case.py new file mode 100644 index 0000000..f036b24 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp/tests/case.py @@ -0,0 +1,85 @@ +from __future__ import absolute_import + +import sys + +from functools import wraps +from io import StringIO + +import mock + +from nose import SkipTest # noqa + +try: + import unittest + unittest.skip +except AttributeError: + import unittest2 as unittest # noqa + +PY3 = sys.version_info[0] == 3 + +patch = mock.patch +call = mock.call + + +class Case(unittest.TestCase): + + def assertItemsEqual(self, a, b, *args, **kwargs): + return self.assertEqual(sorted(a), sorted(b), *args, **kwargs) + assertSameElements = assertItemsEqual + + +class Mock(mock.Mock): + + def __init__(self, *args, **kwargs): + attrs = kwargs.pop('attrs', None) or {} + super(Mock, self).__init__(*args, **kwargs) + for attr_name, attr_value in attrs.items(): + setattr(self, attr_name, attr_value) + + +class _ContextMock(Mock): + """Dummy class implementing __enter__ and __exit__ + as the with statement requires these to be implemented + in the class, not just the instance.""" + + def __enter__(self): + pass + + def __exit__(self, *exc_info): + pass + + +def ContextMock(*args, **kwargs): + obj = _ContextMock(*args, **kwargs) + obj.attach_mock(Mock(), '__enter__') + obj.attach_mock(Mock(), '__exit__') + obj.__enter__.return_value = obj + # if __exit__ return a value the exception is ignored, + # so it must return None here. + obj.__exit__.return_value = None + return obj + + +class MockPool(object): + + def __init__(self, value=None): + self.value = value or ContextMock() + + def acquire(self, **kwargs): + return self.value + + +def redirect_stdouts(fun): + + @wraps(fun) + def _inner(*args, **kwargs): + sys.stdout = StringIO() + sys.stderr = StringIO() + try: + return fun(*args, **dict(kwargs, + stdout=sys.stdout, stderr=sys.stderr)) + finally: + sys.stdout = sys.__stdout__ + sys.stderr = sys.__stderr__ + + return _inner diff --git a/thesisenv/lib/python3.6/site-packages/amqp/tests/test_channel.py b/thesisenv/lib/python3.6/site-packages/amqp/tests/test_channel.py new file mode 100644 index 0000000..1baa159 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp/tests/test_channel.py @@ -0,0 +1,35 @@ +from __future__ import absolute_import + +from collections import defaultdict + +from amqp.channel import Channel +from amqp.exceptions import NotConfirmed +from amqp.serialization import AMQPWriter, AMQPReader + +from amqp.tests.case import Case, Mock + + +class NoOpenChannel(Channel): + + def _x_open(self): + pass + + +class test_Channel(Case): + + def setUp(self): + self.args = AMQPWriter() + self.connection = Mock(name='connection') + self.connection.channels = defaultdict(lambda: None) + self.channel = NoOpenChannel(self.connection, channel_id=1) + + def test_basic_nack(self, delivery_tag=3172312312): + self.args.write_longlong(delivery_tag) + self.args.write_bit(0) + self.args.write_bit(0) + with self.assertRaises(NotConfirmed): + self.channel._basic_nack(AMQPReader(self.args.getvalue())) + callback = Mock(name='callback') + self.channel.events['basic_nack'].add(callback) + self.channel._basic_nack(AMQPReader(self.args.getvalue())) + callback.assert_called_with(delivery_tag, False, False) diff --git a/thesisenv/lib/python3.6/site-packages/amqp/transport.py b/thesisenv/lib/python3.6/site-packages/amqp/transport.py new file mode 100644 index 0000000..a98a692 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp/transport.py @@ -0,0 +1,299 @@ +# Copyright (C) 2009 Barry Pederson +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 +from __future__ import absolute_import + +import errno +import re +import socket +import ssl + +# Jython does not have this attribute +try: + from socket import SOL_TCP +except ImportError: # pragma: no cover + from socket import IPPROTO_TCP as SOL_TCP # noqa + +try: + from ssl import SSLError +except ImportError: + class SSLError(Exception): # noqa + pass + +from struct import pack, unpack + +from .exceptions import UnexpectedFrame +from .utils import get_errno, set_cloexec + +_UNAVAIL = errno.EAGAIN, errno.EINTR, errno.ENOENT + +AMQP_PORT = 5672 + +EMPTY_BUFFER = bytes() + +# Yes, Advanced Message Queuing Protocol Protocol is redundant +AMQP_PROTOCOL_HEADER = 'AMQP\x01\x01\x00\x09'.encode('latin_1') + +# Match things like: [fe80::1]:5432, from RFC 2732 +IPV6_LITERAL = re.compile(r'\[([\.0-9a-f:]+)\](?::(\d+))?') + + +class _AbstractTransport(object): + """Common superclass for TCP and SSL transports""" + connected = False + + def __init__(self, host, connect_timeout): + self.connected = True + msg = None + port = AMQP_PORT + + m = IPV6_LITERAL.match(host) + if m: + host = m.group(1) + if m.group(2): + port = int(m.group(2)) + else: + if ':' in host: + host, port = host.rsplit(':', 1) + port = int(port) + + self.sock = None + last_err = None + for res in socket.getaddrinfo(host, port, 0, + socket.SOCK_STREAM, SOL_TCP): + af, socktype, proto, canonname, sa = res + try: + self.sock = socket.socket(af, socktype, proto) + try: + set_cloexec(self.sock, True) + except NotImplementedError: + pass + self.sock.settimeout(connect_timeout) + self.sock.connect(sa) + except socket.error as exc: + msg = exc + self.sock.close() + self.sock = None + last_err = msg + continue + break + + if not self.sock: + # Didn't connect, return the most recent error message + raise socket.error(last_err) + + try: + self.sock.settimeout(None) + self.sock.setsockopt(SOL_TCP, socket.TCP_NODELAY, 1) + self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) + + self._setup_transport() + + self._write(AMQP_PROTOCOL_HEADER) + except (OSError, IOError, socket.error) as exc: + if get_errno(exc) not in _UNAVAIL: + self.connected = False + raise + + def __del__(self): + try: + # socket module may have been collected by gc + # if this is called by a thread at shutdown. + if socket is not None: + try: + self.close() + except socket.error: + pass + finally: + self.sock = None + + def _read(self, n, initial=False): + """Read exactly n bytes from the peer""" + raise NotImplementedError('Must be overriden in subclass') + + def _setup_transport(self): + """Do any additional initialization of the class (used + by the subclasses).""" + pass + + def _shutdown_transport(self): + """Do any preliminary work in shutting down the connection.""" + pass + + def _write(self, s): + """Completely write a string to the peer.""" + raise NotImplementedError('Must be overriden in subclass') + + def close(self): + if self.sock is not None: + self._shutdown_transport() + # Call shutdown first to make sure that pending messages + # reach the AMQP broker if the program exits after + # calling this method. + self.sock.shutdown(socket.SHUT_RDWR) + self.sock.close() + self.sock = None + self.connected = False + + def read_frame(self, unpack=unpack): + read = self._read + read_frame_buffer = EMPTY_BUFFER + try: + frame_header = read(7, True) + read_frame_buffer += frame_header + frame_type, channel, size = unpack('>BHI', frame_header) + payload = read(size) + read_frame_buffer += payload + ch = ord(read(1)) + except socket.timeout: + self._read_buffer = read_frame_buffer + self._read_buffer + raise + except (OSError, IOError, socket.error) as exc: + # Don't disconnect for ssl read time outs + # http://bugs.python.org/issue10272 + if isinstance(exc, SSLError) and 'timed out' in str(exc): + raise socket.timeout() + if get_errno(exc) not in _UNAVAIL: + self.connected = False + raise + if ch == 206: # '\xce' + return frame_type, channel, payload + else: + raise UnexpectedFrame( + 'Received 0x{0:02x} while expecting 0xce'.format(ch)) + + def write_frame(self, frame_type, channel, payload): + size = len(payload) + try: + self._write(pack( + '>BHI%dsB' % size, + frame_type, channel, size, payload, 0xce, + )) + except socket.timeout: + raise + except (OSError, IOError, socket.error) as exc: + if get_errno(exc) not in _UNAVAIL: + self.connected = False + raise + + +class SSLTransport(_AbstractTransport): + """Transport that works over SSL""" + + def __init__(self, host, connect_timeout, ssl): + if isinstance(ssl, dict): + self.sslopts = ssl + self._read_buffer = EMPTY_BUFFER + super(SSLTransport, self).__init__(host, connect_timeout) + + def _setup_transport(self): + """Wrap the socket in an SSL object.""" + if hasattr(self, 'sslopts'): + self.sock = ssl.wrap_socket(self.sock, **self.sslopts) + else: + self.sock = ssl.wrap_socket(self.sock) + self.sock.do_handshake() + self._quick_recv = self.sock.read + + def _shutdown_transport(self): + """Unwrap a Python 2.6 SSL socket, so we can call shutdown()""" + if self.sock is not None: + try: + unwrap = self.sock.unwrap + except AttributeError: + return + self.sock = unwrap() + + def _read(self, n, initial=False, + _errnos=(errno.ENOENT, errno.EAGAIN, errno.EINTR)): + # According to SSL_read(3), it can at most return 16kb of data. + # Thus, we use an internal read buffer like TCPTransport._read + # to get the exact number of bytes wanted. + recv = self._quick_recv + rbuf = self._read_buffer + try: + while len(rbuf) < n: + try: + s = recv(n - len(rbuf)) # see note above + except socket.error as exc: + # ssl.sock.read may cause ENOENT if the + # operation couldn't be performed (Issue celery#1414). + if not initial and exc.errno in _errnos: + continue + raise + if not s: + raise IOError('Socket closed') + rbuf += s + except: + self._read_buffer = rbuf + raise + result, self._read_buffer = rbuf[:n], rbuf[n:] + return result + + def _write(self, s): + """Write a string out to the SSL socket fully.""" + try: + write = self.sock.write + except AttributeError: + # Works around a bug in python socket library + raise IOError('Socket closed') + else: + while s: + n = write(s) + if not n: + raise IOError('Socket closed') + s = s[n:] + + +class TCPTransport(_AbstractTransport): + """Transport that deals directly with TCP socket.""" + + def _setup_transport(self): + """Setup to _write() directly to the socket, and + do our own buffered reads.""" + self._write = self.sock.sendall + self._read_buffer = EMPTY_BUFFER + self._quick_recv = self.sock.recv + + def _read(self, n, initial=False, _errnos=(errno.EAGAIN, errno.EINTR)): + """Read exactly n bytes from the socket""" + recv = self._quick_recv + rbuf = self._read_buffer + try: + while len(rbuf) < n: + try: + s = recv(n - len(rbuf)) + except socket.error as exc: + if not initial and exc.errno in _errnos: + continue + raise + if not s: + raise IOError('Socket closed') + rbuf += s + except: + self._read_buffer = rbuf + raise + + result, self._read_buffer = rbuf[:n], rbuf[n:] + return result + + +def create_transport(host, connect_timeout, ssl=False): + """Given a few parameters from the Connection constructor, + select and create a subclass of _AbstractTransport.""" + if ssl: + return SSLTransport(host, connect_timeout, ssl) + else: + return TCPTransport(host, connect_timeout) diff --git a/thesisenv/lib/python3.6/site-packages/amqp/utils.py b/thesisenv/lib/python3.6/site-packages/amqp/utils.py new file mode 100644 index 0000000..900d2aa --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/amqp/utils.py @@ -0,0 +1,102 @@ +from __future__ import absolute_import + +import sys + +try: + import fcntl +except ImportError: + fcntl = None # noqa + + +class promise(object): + if not hasattr(sys, 'pypy_version_info'): + __slots__ = tuple( + 'fun args kwargs value ready failed ' + ' on_success on_error calls'.split() + ) + + def __init__(self, fun, args=(), kwargs=(), + on_success=None, on_error=None): + self.fun = fun + self.args = args + self.kwargs = kwargs + self.ready = False + self.failed = False + self.on_success = on_success + self.on_error = on_error + self.value = None + self.calls = 0 + + def __repr__(self): + return '<$: {0.fun.__name__}(*{0.args!r}, **{0.kwargs!r})'.format( + self, + ) + + def __call__(self, *args, **kwargs): + try: + self.value = self.fun( + *self.args + args if self.args else args, + **dict(self.kwargs, **kwargs) if self.kwargs else kwargs + ) + except Exception as exc: + self.set_error_state(exc) + else: + if self.on_success: + self.on_success(self.value) + finally: + self.ready = True + self.calls += 1 + + def then(self, callback=None, on_error=None): + self.on_success = callback + self.on_error = on_error + return callback + + def set_error_state(self, exc): + self.failed = True + if self.on_error is None: + raise + self.on_error(exc) + + def throw(self, exc): + try: + raise exc + except exc.__class__ as with_cause: + self.set_error_state(with_cause) + + +def noop(): + return promise(lambda *a, **k: None) + + +try: + from os import set_cloexec # Python 3.4? +except ImportError: + def set_cloexec(fd, cloexec): # noqa + try: + FD_CLOEXEC = fcntl.FD_CLOEXEC + except AttributeError: + raise NotImplementedError( + 'close-on-exec flag not supported on this platform', + ) + flags = fcntl.fcntl(fd, fcntl.F_GETFD) + if cloexec: + flags |= FD_CLOEXEC + else: + flags &= ~FD_CLOEXEC + return fcntl.fcntl(fd, fcntl.F_SETFD, flags) + + +def get_errno(exc): + """:exc:`socket.error` and :exc:`IOError` first got + the ``.errno`` attribute in Py2.7""" + try: + return exc.errno + except AttributeError: + try: + # e.args = (errno, reason) + if isinstance(exc.args, tuple) and len(exc.args) == 2: + return exc.args[0] + except AttributeError: + pass + return 0 diff --git a/thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/PKG-INFO b/thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/PKG-INFO new file mode 100644 index 0000000..c8e8a3d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/PKG-INFO @@ -0,0 +1,85 @@ +Metadata-Version: 1.1 +Name: anyjson +Version: 0.3.3 +Summary: Wraps the best available JSON implementation available in a common interface +Home-page: http://bitbucket.org/runeh/anyjson/ +Author: Rune Halvorsen +Author-email: runefh@gmail.com +License: BSD +Description: ############################## + anyjson - JSON library wrapper + ############################## + + Overview + -------- + + Anyjson loads whichever is the fastest JSON module installed and provides + a uniform API regardless of which JSON implementation is used. + + Originally part of carrot (http://github.com/ask/carrot/) + + Examples + -------- + + To serialize a python object to a JSON string, call the `serialize` function: + + >>> import anyjson + >>> anyjson.serialize(["test", 1, {"foo": 3.141592}, "bar"]) + '["test", 1, {"foo": 3.141592}, "bar"]' + + Conversion the other way is done with the `deserialize` call. + + >>> anyjson.deserialize("""["test", 1, {"foo": 3.141592}, "bar"]""") + ['test', 1, {'foo': 3.1415920000000002}, 'bar'] + + Regardless of the JSON implementation used, the exceptions will be the same. + This means that trying to serialize something not compatible with JSON + raises a TypeError: + + >>> anyjson.serialize([object()]) + Traceback (most recent call last): + + TypeError: object is not JSON encodable + + And deserializing a JSON string with invalid JSON raises a ValueError: + + >>> anyjson.deserialize("""['missing square brace!""") + Traceback (most recent call last): + + ValueError: cannot parse JSON description + + + Contact + ------- + + The module is maintaned by Rune F. Halvorsen . + The project resides at http://bitbucket.org/runeh/anyjson . Bugs and feature + requests can be submitted there. Patches are also very welcome. + + Changelog + --------- + + See CHANGELOG file + + License + ------- + + see the LICENSE file + +Keywords: json +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.4 +Classifier: Programming Language :: Python :: 2.5 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.1 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python :: Implementation :: Jython diff --git a/thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/SOURCES.txt b/thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/SOURCES.txt new file mode 100644 index 0000000..66078fc --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/SOURCES.txt @@ -0,0 +1,15 @@ +CHANGELOG +LICENSE +MANIFEST.in +README +setup.cfg +setup.py +anyjson/__init__.py +anyjson.egg-info/PKG-INFO +anyjson.egg-info/SOURCES.txt +anyjson.egg-info/dependency_links.txt +anyjson.egg-info/not-zip-safe +anyjson.egg-info/top_level.txt +tests/benchmark.py +tests/test_implementations.py +tests/test_implementations.pyc \ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/dependency_links.txt b/thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/installed-files.txt b/thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/installed-files.txt new file mode 100644 index 0000000..69125be --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/installed-files.txt @@ -0,0 +1,7 @@ +../anyjson/__init__.py +../anyjson/__pycache__/__init__.cpython-36.pyc +PKG-INFO +SOURCES.txt +dependency_links.txt +not-zip-safe +top_level.txt diff --git a/thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/not-zip-safe b/thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/not-zip-safe new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/not-zip-safe @@ -0,0 +1 @@ + diff --git a/thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/top_level.txt b/thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/top_level.txt new file mode 100644 index 0000000..93fe7af --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/anyjson-0.3.3-py3.6.egg-info/top_level.txt @@ -0,0 +1 @@ +anyjson diff --git a/thesisenv/lib/python3.6/site-packages/anyjson/__init__.py b/thesisenv/lib/python3.6/site-packages/anyjson/__init__.py new file mode 100644 index 0000000..7d68692 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/anyjson/__init__.py @@ -0,0 +1,142 @@ +"""Wraps the best available JSON implementation available in a common +interface""" + +import sys + +VERSION = (0, 3, 3) +__version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:]) +__author__ = "Rune Halvorsen" +__contact__ = "runefh@gmail.com" +__homepage__ = "http://bitbucket.org/runeh/anyjson/" +__docformat__ = "restructuredtext" + +# -eof meta- + +#: The json implementation object. This is probably not useful to you, +#: except to get the name of the implementation in use. The name is +#: available through ``implementation.name``. +implementation = None + +# json.loads does not support buffer() objects, +# so we load() and StringIO instead, and it won't copy. +if sys.version_info[0] == 3: + from io import StringIO +else: + try: + from io import StringIO # noqa + except ImportError: + from io import StringIO # noqa + +#: List of known json modules, and the names of their loads/dumps +#: methods, as well as the exceptions they throw. Exception can be either +#: an exception class or a string. +_modules = [("yajl", "dumps", TypeError, "loads", ValueError, "load"), + ("jsonlib2", "write", "WriteError", "read", "ReadError", None), + ("jsonlib", "write", "WriteError", "read", "ReadError", None), + ("simplejson", "dumps", TypeError, "loads", ValueError, "load"), + ("json", "dumps", TypeError, "loads", ValueError, "load"), + ("django.utils.simplejson", "dumps", TypeError, "loads", ValueError, "load"), + ("cjson", "encode", "EncodeError", "decode", "DecodeError", None) + ] + +_fields = ("modname", "encoder", "encerror", + "decoder", "decerror", "filedecoder") + + +class _JsonImplementation(object): + """Incapsulates a JSON implementation""" + + def __init__(self, modspec): + modinfo = dict(list(zip(_fields, modspec))) + + if modinfo["modname"] == "cjson": + import warnings + warnings.warn("cjson is deprecated! See http://pypi.python.org/pypi/python-cjson/1.0.5", DeprecationWarning) + + # No try block. We want importerror to end up at caller + module = self._attempt_load(modinfo["modname"]) + + self.implementation = modinfo["modname"] + self._encode = getattr(module, modinfo["encoder"]) + self._decode = getattr(module, modinfo["decoder"]) + fdec = modinfo["filedecoder"] + self._filedecode = fdec and getattr(module, fdec) + self._encode_error = modinfo["encerror"] + self._decode_error = modinfo["decerror"] + + if isinstance(modinfo["encerror"], str): + self._encode_error = getattr(module, modinfo["encerror"]) + if isinstance(modinfo["decerror"], str): + self._decode_error = getattr(module, modinfo["decerror"]) + + self.name = modinfo["modname"] + + def __repr__(self): + return "<_JsonImplementation instance using %s>" % self.name + + def _attempt_load(self, modname): + """Attempt to load module name modname, returning it on success, + throwing ImportError if module couldn't be imported""" + __import__(modname) + return sys.modules[modname] + + def dumps(self, data): + """Serialize the datastructure to json. Returns a string. Raises + TypeError if the object could not be serialized.""" + try: + return self._encode(data) + except self._encode_error as exc: + raise TypeError(TypeError(*exc.args)).with_traceback(sys.exc_info()[2]) + serialize = dumps + + def loads(self, s): + """deserialize the string to python data types. Raises + ValueError if the string could not be parsed.""" + # uses StringIO to support buffer objects. + try: + if self._filedecode and not isinstance(s, str): + return self._filedecode(StringIO(s)) + return self._decode(s) + except self._decode_error as exc: + raise ValueError(ValueError(*exc.args)).with_traceback(sys.exc_info()[2]) + deserialize = loads + + +def force_implementation(modname): + """Forces anyjson to use a specific json module if it's available""" + global implementation + for name, spec in [(e[0], e) for e in _modules]: + if name == modname: + implementation = _JsonImplementation(spec) + return + raise ImportError("No module named: %s" % modname) + + +if __name__ == "__main__": + # If run as a script, we do nothing but print an error message. + # We do NOT try to load a compatible module because that may throw an + # exception, which renders the package uninstallable with easy_install + # (It trys to execfile the script when installing, to make sure it works) + print("Running anyjson as a stand alone script is not supported") + sys.exit(1) +else: + for modspec in _modules: + try: + implementation = _JsonImplementation(modspec) + break + except ImportError: + pass + else: + raise ImportError("No supported JSON module found") + + + def loads(value): + """Serialize the object to JSON.""" + return implementation.loads(value) + deserialize = loads # compat + + + def dumps(value): + """Deserialize JSON-encoded object to a Python object.""" + return implementation.dumps(value) + serialize = dumps diff --git a/thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/PKG-INFO b/thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/PKG-INFO new file mode 100644 index 0000000..7ce876b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/PKG-INFO @@ -0,0 +1,792 @@ +Metadata-Version: 1.2 +Name: billiard +Version: 3.3.0.23 +Summary: Python multiprocessing fork with improvements and bugfixes +Home-page: http://github.com/celery/billiard +Author: R Oudkerk / Python Software Foundation +Author-email: python-dev@python.org +Maintainer: Ask Solem +Maintainer-email: ask@celeryproject.org +License: BSD +Description: ======== + billiard + ======== + :version: 3.3.0.23 + + About + ----- + + `billiard` is a fork of the Python 2.7 `multiprocessing `_ + package. The multiprocessing package itself is a renamed and updated version of + R Oudkerk's `pyprocessing `_ package. + This standalone variant is intended to be compatible with Python 2.4 and 2.5, + and will draw it's fixes/improvements from python-trunk. + + - This package would not be possible if not for the contributions of not only + the current maintainers but all of the contributors to the original pyprocessing + package listed `here `_ + + - Also it is a fork of the multiprocessin backport package by Christian Heims. + + - It includes the no-execv patch contributed by R. Oudkerk. + + - And the Pool improvements previously located in `Celery`_. + + .. _`Celery`: http://celeryproject.org + + + Bug reporting + ------------- + + Please report bugs related to multiprocessing at the + `Python bug tracker `_. Issues related to billiard + should be reported at http://github.com/celery/billiard/issues. + + + .. image:: https://d2weczhvl823v0.cloudfront.net/celery/billiard/trend.png + :alt: Bitdeli badge + :target: https://bitdeli.com/free + + + =========== + Changes + =========== + + 3.3.0.23 - 2016-03-03 + --------------------- + + - ExceptionInfo: Adds tb_lasti and other missing traceback fields + (Issue #180). + + - monotonic: Now makes sure ctypes is available. + + - PipeConnection: Make sure the pipe is not closed multiple times. + + 3.3.0.22 - 2015-12-08 + --------------------- + + - Wheel packages for Windows now available. + + 3.3.0.21 - 2015-10-26 + --------------------- + + - Pool: Fixed semaphore error on Python3. + + - Fixed libSystem error on OS X El Capitan. + + 3.3.0.20 - 2015-04-17 + --------------------- + + - Pool: Timeouts will attempt to send SIGKILL, but this signal + does not exist on Windows. Replaced with SIGTERM. + + 3.3.0.19 - 2014-10-13 + --------------------- + + - Pool: Exceptions in user timeout callbacks are now logged instead + of crashing the pool. + + Contributed by Pierre Fersing. + + - Pool: Exit codes in errors were improperly being represented as signals. + + - Pool: ``.map``. and ``.imap`` now working again. + + - Now builds on FreeBSD 10. + + Contributed by Michael Fladischer. + + 3.3.0.18 - 2014-06-20 + --------------------- + + - Now compiles on GNU/kFreeBSD + + Contributed by Michael Fladischer. + + - Pool: `AF_PIPE` address fixed so that it works on recent Windows versions + in combination with Python 2.7.7. + + Fix contributed by Joshua Tacoma. + + - Pool: Fix for `Supervisor object has no attribute _children` error. + + Fix contributed by Andres Riancho. + + - Pool: Fixed bug with human_status(None). + + - Pool: shrink did not work properly if asked to remove more than 1 process. + + + 3.3.0.17 - 2014-04-16 + --------------------- + + - Fixes SemLock on Python 3.4 (Issue #107) when using + ``forking_enable(False)``. + + - Pool: Include more useful exitcode information when processes exit. + + 3.3.0.16 - 2014-02-11 + --------------------- + + - Previous release was missing the billiard.py3 package from MANIFEST + so the installation would not work on Python 3. + + 3.3.0.15 - 2014-02-10 + --------------------- + + - Pool: Fixed "cannot join process not started" error. + + - Now uses billiard.py2 and billiard.py3 specific packages that are installed + depending on the python version used. + + This way the installation will not import version specific modules (and + possibly crash). + + 3.3.0.14 - 2014-01-17 + --------------------- + + - Fixed problem with our backwards compatible ``bytes`` wrapper + (Issue #103). + + - No longer expects frozen applications to have a valid ``__file__`` + attribute. + + Fix contributed by George Sibble. + + 3.3.0.13 - 2013-12-13 + --------------------- + + - Fixes compatability with Python < 2.7.6 + + - No longer attempts to handle ``SIGBUS`` + + Contributed by Vishal Vatsa. + + - Non-thread based pool now only handles signals: + + ``SIGHUP``, ``SIGQUIT``, ``SIGTERM``, ``SIGUSR1``, + ``SIGUSR2``. + + - setup.py: Only show compilation warning for build related commands. + + 3.3.0.12 - 2013-12-09 + --------------------- + + - Fixed installation for Python 3. + + Contributed by Rickert Mulder. + + - Pool: Fixed bug with maxtasksperchild. + + Fix contributed by Ionel Cristian Maries. + + - Pool: Fixed bug in maintain_pool. + + 3.3.0.11 - 2013-12-03 + --------------------- + + - Fixed Unicode error when installing the distribution (Issue #89). + + - Daemonic processes are now allowed to have children. + + But note that it will not be possible to automatically + terminate them when the process exits. + + See discussion at https://github.com/celery/celery/issues/1709 + + - Pool: Would not always be able to detect that a process exited. + + + 3.3.0.10 - 2013-12-02 + --------------------- + + - Windows: Fixed problem with missing ``WAITABANDONED_0`` + + Fix contributed by Matthias Wagner + + - Windows: PipeConnection can now be inherited. + + Fix contributed by Matthias Wagner + + 3.3.0.9 - 2013-12-02 + -------------------- + + - Temporary workaround for Celery maxtasksperchild issue. + + Fix contributed by Ionel Cristian Maries. + + 3.3.0.8 - 2013-11-21 + -------------------- + + - Now also sets ``multiprocessing.current_process`` for compatibility + with loggings ``processName`` field. + + 3.3.0.7 - 2013-11-15 + -------------------- + + - Fixed compatibility with PyPy 2.1 + 2.2. + + - Fixed problem in pypy detection. + + Fix contributed by Tin Tvrtkovic. + + - Now uses ``ctypes.find_library`` instead of hardcoded path to find + the OS X CoreServices framework. + + Fix contributed by Moritz Kassner. + + + 3.3.0.6 - 2013-11-12 + -------------------- + + - Now works without C extension again. + + - New ``_billiard.read(fd, buffer, [len, ])`` function + implements os.read with buffer support (new buffer API) + + - New pure-python implementation of ``Connection.send_offset``. + + 3.3.0.5 - 2013-11-11 + -------------------- + + - All platforms except for Windows/PyPy/Jython now requires the C extension. + + 3.3.0.4 - 2013-11-11 + -------------------- + + - Fixed problem with Python3 and setblocking. + + 3.3.0.3 - 2013-11-09 + -------------------- + + - Now works on Windows again. + + 3.3.0.2 - 2013-11-08 + -------------------- + + - ApplyResult.terminate() may be set to signify that the job + must not be executed. It can be used in combination with + Pool.terminate_job. + + - Pipe/_SimpleQueue: Now supports rnonblock/wnonblock arguments + to set the read or write end of the pipe to be nonblocking. + + - Pool: Log message included exception info but exception happened + in another process so the resulting traceback was wrong. + + - Pool: Worker process can now prepare results before they are sent + back to the main process (using ``Worker.prepare_result``). + + 3.3.0.1 - 2013-11-04 + -------------------- + + - Pool: New ``correlation_id`` argument to ``apply_async`` can be + used to set a related id for the ``ApplyResult`` object returned: + + >>> r = pool.apply_async(target, args, kwargs, correlation_id='foo') + >>> r.correlation_id + 'foo' + + - Pool: New callback `on_process_exit` is called when a pool + process exits, with signature ``(pid, exitcode)``. + + Contributed by Daniel M. Taub. + + - Pool: Improved the too many restarts detection. + + 3.3.0.0 - 2013-10-14 + -------------------- + + - Dual code base now runs on Python 2.6+ and Python 3. + + - No longer compatible with Python 2.5 + + - Includes many changes from multiprocessing in 3.4. + + - Now uses ``time.monotonic`` when available, also including + fallback implementations for Linux and OS X. + + - No longer cleans up after receiving SIGILL, SIGSEGV or SIGFPE + + Contributed by Kevin Blackham + + - ``Finalize`` and ``register_after_fork`` is now aliases to multiprocessing. + + It's better to import these from multiprocessing directly now + so that there aren't multiple registries. + + - New `billiard.queues._SimpleQueue` that does not use semaphores. + + - Pool: Can now be extended to support using multiple IPC queues. + + - Pool: Can now use async I/O to write to pool IPC queues. + + - Pool: New ``Worker.on_loop_stop`` handler can be used to add actions + at pool worker process shutdown. + + Note that, like all finalization handlers, there is no guarantee that + this will be executed. + + Contributed by dmtaub. + + 2.7.3.30 - 2013-06-28 + --------------------- + + - Fixed ImportError in billiard._ext + + 2.7.3.29 - 2013-06-28 + --------------------- + + - Compilation: Fixed improper handling of HAVE_SEM_OPEN (Issue #55) + + Fix contributed by Krzysztof Jagiello. + + - Process now releases logging locks after fork. + + This previously happened in Pool, but it was done too late + as processes logs when they bootstrap. + + - Pool.terminate_job now ignores `No such process` errors. + + - billiard.Pool entrypoint did not support new arguments + to billiard.pool.Pool + + - Connection inbound buffer size increased from 1kb to 128kb. + + - C extension cleaned up by properly adding a namespace to symbols. + + - _exit_function now works even if thread wakes up after gc collect. + + 2.7.3.28 - 2013-04-16 + --------------------- + + - Pool: Fixed regression that disabled the deadlock + fix in 2.7.3.24 + + - Pool: RestartFreqExceeded could be raised prematurely. + + - Process: Include pid in startup and process INFO logs. + + 2.7.3.27 - 2013-04-12 + --------------------- + + - Manager now works again. + + - Python 3 fixes for billiard.connection. + + - Fixed invalid argument bug when running on Python 3.3 + + Fix contributed by Nathan Wan. + + - Ignore OSError when setting up signal handlers. + + 2.7.3.26 - 2013-04-09 + --------------------- + + - Pool: Child processes must ignore SIGINT. + + 2.7.3.25 - 2013-04-09 + --------------------- + + - Pool: 2.7.3.24 broke support for subprocesses (Issue #48). + + Signals that should be ignored were instead handled + by terminating. + + 2.7.3.24 - 2013-04-08 + --------------------- + + - Pool: Make sure finally blocks are called when process exits + due to a signal. + + This fixes a deadlock problem when the process is killed + while having acquired the shared semaphore. However, this solution + does not protect against the processes being killed, a more elaborate + solution is required for that. Hopefully this will be fixed soon in a + later version. + + - Pool: Can now use GDB to debug pool child processes. + + - Fixes Python 3 compatibility problems. + + Contributed by Albertas Agejevas. + + 2.7.3.23 - 2013-03-22 + --------------------- + + - Windows: Now catches SystemExit from setuptools while trying to build + the C extension (Issue #41). + + 2.7.3.22 - 2013-03-08 + --------------------- + + - Pool: apply_async now supports a ``callbacks_propagate`` keyword + argument that can be a tuple of exceptions to propagate in callbacks. + (callback, errback, accept_callback, timeout_callback). + + - Errors are no longer logged for OK and recycle exit codes. + + This would cause normal maxtasksperchild recycled process + to log an error. + + - Fixed Python 2.5 compatibility problem (Issue #33). + + - FreeBSD: Compilation now disables semaphores if Python was built + without it (Issue #40). + + Contributed by William Grzybowski + + 2.7.3.21 - 2013-02-11 + --------------------- + + - Fixed typo EX_REUSE -> EX_RECYCLE + + - Code now conforms to new pep8.py rules. + + 2.7.3.20 - 2013-02-08 + --------------------- + + - Pool: Disable restart limit if maxR is not set. + + - Pool: Now uses os.kill instead of signal.signal. + + Contributed by Lukasz Langa + + - Fixed name error in process.py + + - Pool: ApplyResult.get now properly raises exceptions. + + Fix contributed by xentac. + + 2.7.3.19 - 2012-11-30 + --------------------- + + - Fixes problem at shutdown when gc has collected symbols. + + - Pool now always uses _kill for Py2.5 compatibility on Windows (Issue #32). + + - Fixes Python 3 compatibility issues + + 2.7.3.18 - 2012-11-05 + --------------------- + + - [Pool] Fix for check_timeouts if not set. + + Fix contributed by Dmitry Sukhov + + - Fixed pickle problem with Traceback. + + Code.frame.__loader__ is now ignored as it may be set to + an unpickleable object. + + - The Django old-layout warning was always showing. + + 2.7.3.17 - 2012-09-26 + --------------------- + + - Fixes typo + + 2.7.3.16 - 2012-09-26 + --------------------- + + - Windows: Fixes for SemLock._rebuild (Issue #24). + + - Pool: Job terminated with terminate_job now raises + billiard.exceptions.Terminated. + + 2.7.3.15 - 2012-09-21 + --------------------- + + - Windows: Fixes unpickling of SemLock when using fallback. + + - Windows: Fixes installation when no C compiler. + + 2.7.3.14 - 2012-09-20 + --------------------- + + - Installation now works again for Python 3. + + 2.7.3.13 - 2012-09-14 + --------------------- + + - Merged with Python trunk (many authors, many fixes: see Python changelog in + trunk). + + - Using execv now also works with older Django projects using setup_environ + (Issue #10). + + - Billiard now installs with a warning that the C extension could not be built + if a compiler is not installed or the build fails in some other way. + + It really is recommended to have the C extension installed when running + with force execv, but this change also makes it easier to install. + + - Pool: Hard timeouts now sends KILL shortly after TERM so that C extensions + cannot block the signal. + + Python signal handlers are called in the interpreter, so they cannot + be called while a C extension is blocking the interpreter from running. + + - Now uses a timeout value for Thread.join that doesn't exceed the maximum + on some platforms. + + - Fixed bug in the SemLock fallback used when C extensions not installed. + + Fix contributed by Mher Movsisyan. + + - Pool: Now sets a Process.index attribute for every process in the pool. + + This number will always be between 0 and concurrency-1, and + can be used to e.g. create a logfile for each process in the pool + without creating a new logfile whenever a process is replaced. + + 2.7.3.12 - 2012-08-05 + --------------------- + + - Fixed Python 2.5 compatibility issue. + + - New Pool.terminate_job(pid) to terminate a job without raising WorkerLostError + + 2.7.3.11 - 2012-08-01 + --------------------- + + - Adds support for FreeBSD 7+ + + Fix contributed by koobs. + + - Pool: New argument ``allow_restart`` is now required to enable + the pool process sentinel that is required to restart the pool. + + It's disabled by default, which reduces the number of file + descriptors/semaphores required to run the pool. + + - Pool: Now emits a warning if a worker process exited with error-code. + + But not if the error code is 155, which is now returned if the worker + process was recycled (maxtasksperchild). + + - Python 3 compatibility fixes. + + - Python 2.5 compatibility fixes. + + 2.7.3.10 - 2012-06-26 + --------------------- + + - The ``TimeLimitExceeded`` exception string representation + only included the seconds as a number, it now gives a more human + friendly description. + + - Fixed typo in ``LaxBoundedSemaphore.shrink``. + + - Pool: ``ResultHandler.handle_event`` no longer requires + any arguments. + + - setup.py bdist now works + + 2.7.3.9 - 2012-06-03 + -------------------- + + - Environment variable ``MP_MAIN_FILE`` envvar is now set to + the path of the ``__main__`` module when execv is enabled. + + - Pool: Errors occurring in the TaskHandler are now reported. + + 2.7.3.8 - 2012-06-01 + -------------------- + + - Can now be installed on Py 3.2 + + - Issue #12091: simplify ApplyResult and MapResult with threading.Event + + Patch by Charles-Francois Natali + + - Pool: Support running without TimeoutHandler thread. + + - The with_*_thread arguments has also been replaced with + a single `threads=True` argument. + + - Two new pool callbacks: + + - ``on_timeout_set(job, soft, hard)`` + + Applied when a task is executed with a timeout. + + - ``on_timeout_cancel(job)`` + + Applied when a timeout is cancelled (the job completed) + + 2.7.3.7 - 2012-05-21 + -------------------- + + - Fixes Python 2.5 support. + + 2.7.3.6 - 2012-05-21 + -------------------- + + - Pool: Can now be used in an event loop, without starting the supporting + threads (TimeoutHandler still not supported) + + To facilitate this the pool has gained the following keyword arguments: + + - ``with_task_thread`` + - ``with_result_thread`` + - ``with_supervisor_thread`` + - ``on_process_up`` + + Callback called with Process instance as argument + whenever a new worker process is added. + + Used to add new process fds to the eventloop:: + + def on_process_up(proc): + hub.add_reader(proc.sentinel, pool.maintain_pool) + + - ``on_process_down`` + + Callback called with Process instance as argument + whenever a new worker process is found dead. + + Used to remove process fds from the eventloop:: + + def on_process_down(proc): + hub.remove(proc.sentinel) + + - ``semaphore`` + + Sets the semaphore used to protect from adding new items to the + pool when no processes available. The default is a threaded + one, so this can be used to change to an async semaphore. + + And the following attributes:: + + - ``readers`` + + A map of ``fd`` -> ``callback``, to be registered in an eventloop. + Currently this is only the result outqueue with a callback + that processes all currently incoming results. + + And the following methods:: + + - ``did_start_ok`` + + To be called after starting the pool, and after setting up the + eventloop with the pool fds, to ensure that the worker processes + didn't immediately exit caused by an error (internal/memory). + + - ``maintain_pool`` + + Public version of ``_maintain_pool`` that handles max restarts. + + - Pool: Process too frequent restart protection now only counts if the process + had a non-successful exit-code. + + This to take into account the maxtasksperchild option, and allowing + processes to exit cleanly on their own. + + - Pool: New options max_restart + max_restart_freq + + This means that the supervisor can't restart processes + faster than max_restart' times per max_restart_freq seconds + (like the Erlang supervisor maxR & maxT settings). + + The pool is closed and joined if the max restart + frequency is exceeded, where previously it would keep restarting + at an unlimited rate, possibly crashing the system. + + The current default value is to stop if it exceeds + 100 * process_count restarts in 1 seconds. This may change later. + + It will only count processes with an unsuccessful exit code, + this is to take into account the ``maxtasksperchild`` setting + and code that voluntarily exits. + + - Pool: The ``WorkerLostError`` message now includes the exit-code of the + process that disappeared. + + + 2.7.3.5 - 2012-05-09 + -------------------- + + - Now always cleans up after ``sys.exc_info()`` to avoid + cyclic references. + + - ExceptionInfo without arguments now defaults to ``sys.exc_info``. + + - Forking can now be disabled using the + ``MULTIPROCESSING_FORKING_DISABLE`` environment variable. + + Also this envvar is set so that the behavior is inherited + after execv. + + - The semaphore cleanup process started when execv is used + now sets a useful process name if the ``setproctitle`` + module is installed. + + - Sets the ``FORKED_BY_MULTIPROCESSING`` + environment variable if forking is disabled. + + + 2.7.3.4 - 2012-04-27 + -------------------- + + - Added `billiard.ensure_multiprocessing()` + + Raises NotImplementedError if the platform does not support + multiprocessing (e.g. Jython). + + + 2.7.3.3 - 2012-04-23 + -------------------- + + - PyPy now falls back to using its internal _multiprocessing module, + so everything works except for forking_enable(False) (which + silently degrades). + + - Fixed Python 2.5 compat. issues. + + - Uses more with statements + + - Merged some of the changes from the Python 3 branch. + + 2.7.3.2 - 2012-04-20 + -------------------- + + - Now installs on PyPy/Jython (but does not work). + + 2.7.3.1 - 2012-04-20 + -------------------- + + - Python 2.5 support added. + + 2.7.3.0 - 2012-04-20 + -------------------- + + - Updated from Python 2.7.3 + + - Python 2.4 support removed, now only supports 2.5, 2.6 and 2.7. + (may consider py3k support at some point). + + - Pool improvements from Celery. + + - no-execv patch added (http://bugs.python.org/issue8713) + +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python +Classifier: Programming Language :: C +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.5 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.2 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: Jython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX +Classifier: License :: OSI Approved :: BSD License +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: System :: Distributed Computing diff --git a/thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/SOURCES.txt b/thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/SOURCES.txt new file mode 100644 index 0000000..aa6f1a9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/SOURCES.txt @@ -0,0 +1,71 @@ +CHANGES.txt +INSTALL.txt +LICENSE.txt +MANIFEST.in +Makefile +README.rst +pip-delete-this-directory.txt +setup.cfg +setup.py +Doc/conf.py +Doc/glossary.rst +Doc/index.rst +Doc/includes/__init__.py +Doc/includes/mp_benchmarks.py +Doc/includes/mp_newtype.py +Doc/includes/mp_pool.py +Doc/includes/mp_synchronize.py +Doc/includes/mp_webserver.py +Doc/includes/mp_workers.py +Doc/library/multiprocessing.rst +Modules/_billiard/connection.h +Modules/_billiard/multiprocessing.c +Modules/_billiard/multiprocessing.h +Modules/_billiard/pipe_connection.c +Modules/_billiard/semaphore.c +Modules/_billiard/socket_connection.c +Modules/_billiard/win32_functions.c +billiard/__init__.py +billiard/_ext.py +billiard/_win.py +billiard/common.py +billiard/compat.py +billiard/connection.py +billiard/einfo.py +billiard/exceptions.py +billiard/five.py +billiard/forking.py +billiard/heap.py +billiard/managers.py +billiard/pool.py +billiard/process.py +billiard/queues.py +billiard/reduction.py +billiard/sharedctypes.py +billiard/synchronize.py +billiard/util.py +billiard.egg-info/PKG-INFO +billiard.egg-info/SOURCES.txt +billiard.egg-info/dependency_links.txt +billiard.egg-info/not-zip-safe +billiard.egg-info/top_level.txt +billiard/dummy/__init__.py +billiard/dummy/connection.py +billiard/py2/__init__.py +billiard/py2/connection.py +billiard/py2/reduction.py +billiard/py3/__init__.py +billiard/py3/connection.py +billiard/py3/reduction.py +billiard/tests/__init__.py +billiard/tests/compat.py +billiard/tests/test_common.py +billiard/tests/test_package.py +billiard/tests/utils.py +funtests/__init__.py +funtests/setup.py +funtests/tests/__init__.py +funtests/tests/test_multiprocessing.py +requirements/test-ci.txt +requirements/test.txt +requirements/test3.txt \ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/dependency_links.txt b/thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/installed-files.txt b/thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/installed-files.txt new file mode 100644 index 0000000..a3a8eba --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/installed-files.txt @@ -0,0 +1,67 @@ +../billiard/__init__.py +../billiard/__pycache__/__init__.cpython-36.pyc +../billiard/__pycache__/_ext.cpython-36.pyc +../billiard/__pycache__/_win.cpython-36.pyc +../billiard/__pycache__/common.cpython-36.pyc +../billiard/__pycache__/compat.cpython-36.pyc +../billiard/__pycache__/connection.cpython-36.pyc +../billiard/__pycache__/einfo.cpython-36.pyc +../billiard/__pycache__/exceptions.cpython-36.pyc +../billiard/__pycache__/five.cpython-36.pyc +../billiard/__pycache__/forking.cpython-36.pyc +../billiard/__pycache__/heap.cpython-36.pyc +../billiard/__pycache__/managers.cpython-36.pyc +../billiard/__pycache__/pool.cpython-36.pyc +../billiard/__pycache__/process.cpython-36.pyc +../billiard/__pycache__/queues.cpython-36.pyc +../billiard/__pycache__/reduction.cpython-36.pyc +../billiard/__pycache__/sharedctypes.cpython-36.pyc +../billiard/__pycache__/synchronize.cpython-36.pyc +../billiard/__pycache__/util.cpython-36.pyc +../billiard/_ext.py +../billiard/_win.py +../billiard/common.py +../billiard/compat.py +../billiard/connection.py +../billiard/dummy/__init__.py +../billiard/dummy/__pycache__/__init__.cpython-36.pyc +../billiard/dummy/__pycache__/connection.cpython-36.pyc +../billiard/dummy/connection.py +../billiard/einfo.py +../billiard/exceptions.py +../billiard/five.py +../billiard/forking.py +../billiard/heap.py +../billiard/managers.py +../billiard/pool.py +../billiard/process.py +../billiard/py3/__init__.py +../billiard/py3/__pycache__/__init__.cpython-36.pyc +../billiard/py3/__pycache__/connection.cpython-36.pyc +../billiard/py3/__pycache__/reduction.cpython-36.pyc +../billiard/py3/connection.py +../billiard/py3/reduction.py +../billiard/queues.py +../billiard/reduction.py +../billiard/sharedctypes.py +../billiard/synchronize.py +../billiard/tests/__init__.py +../billiard/tests/__pycache__/__init__.cpython-36.pyc +../billiard/tests/__pycache__/compat.cpython-36.pyc +../billiard/tests/__pycache__/test_common.cpython-36.pyc +../billiard/tests/__pycache__/test_package.cpython-36.pyc +../billiard/tests/__pycache__/utils.cpython-36.pyc +../billiard/tests/compat.py +../billiard/tests/test_common.py +../billiard/tests/test_package.py +../billiard/tests/utils.py +../billiard/util.py +../funtests/__init__.py +../funtests/__pycache__/__init__.cpython-36.pyc +../funtests/__pycache__/setup.cpython-36.pyc +../funtests/setup.py +PKG-INFO +SOURCES.txt +dependency_links.txt +not-zip-safe +top_level.txt diff --git a/thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/not-zip-safe b/thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/not-zip-safe new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/not-zip-safe @@ -0,0 +1 @@ + diff --git a/thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/top_level.txt b/thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/top_level.txt new file mode 100644 index 0000000..3b5018e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard-3.3.0.23-py3.6.egg-info/top_level.txt @@ -0,0 +1,2 @@ +billiard +funtests diff --git a/thesisenv/lib/python3.6/site-packages/billiard/__init__.py b/thesisenv/lib/python3.6/site-packages/billiard/__init__.py new file mode 100644 index 0000000..b0fe082 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/__init__.py @@ -0,0 +1,323 @@ +"""Python multiprocessing fork with improvements and bugfixes""" +# +# Package analogous to 'threading.py' but using processes +# +# multiprocessing/__init__.py +# +# This package is intended to duplicate the functionality (and much of +# the API) of threading.py but uses processes instead of threads. A +# subpackage 'multiprocessing.dummy' has the same API but is a simple +# wrapper for 'threading'. +# +# Try calling `multiprocessing.doc.main()` to read the html +# documentation in a webbrowser. +# +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +from __future__ import absolute_import + +import os +import sys +import warnings + +from .exceptions import ( # noqa + ProcessError, + BufferTooShort, + TimeoutError, + AuthenticationError, + TimeLimitExceeded, + SoftTimeLimitExceeded, + WorkerLostError, +) +from .process import Process, current_process, active_children +from .util import SUBDEBUG, SUBWARNING + +VERSION = (3, 3, 0, 23) +__version__ = '.'.join(map(str, VERSION[0:4])) + "".join(VERSION[4:]) +__author__ = 'R Oudkerk / Python Software Foundation' +__author_email__ = 'python-dev@python.org' +__maintainer__ = 'Ask Solem' +__contact__ = "ask@celeryproject.org" +__homepage__ = "http://github.com/celery/billiard" +__docformat__ = "restructuredtext" + +# -eof meta- + +__all__ = [ + 'Process', 'current_process', 'active_children', 'freeze_support', + 'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger', + 'allow_connection_pickling', 'BufferTooShort', 'TimeoutError', + 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', + 'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array', + 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING', 'set_executable', + 'forking_enable', 'forking_is_enabled' +] + + +def ensure_multiprocessing(): + from ._ext import ensure_multiprocessing + return ensure_multiprocessing() + + +W_NO_EXECV = """\ +force_execv is not supported as the billiard C extension \ +is not installed\ +""" + +# +# Definitions not depending on native semaphores +# + + +def Manager(): + ''' + Returns a manager associated with a running server process + + The managers methods such as `Lock()`, `Condition()` and `Queue()` + can be used to create shared objects. + ''' + from .managers import SyncManager + m = SyncManager() + m.start() + return m + + +def Pipe(duplex=True, rnonblock=False, wnonblock=False): + ''' + Returns two connection object connected by a pipe + ''' + from billiard.connection import Pipe + return Pipe(duplex, rnonblock, wnonblock) + + +def cpu_count(): + ''' + Returns the number of CPUs in the system + ''' + if sys.platform == 'win32': + try: + num = int(os.environ['NUMBER_OF_PROCESSORS']) + except (ValueError, KeyError): + num = 0 + elif 'bsd' in sys.platform or sys.platform == 'darwin': + comm = '/sbin/sysctl -n hw.ncpu' + if sys.platform == 'darwin': + comm = '/usr' + comm + try: + with os.popen(comm) as p: + num = int(p.read()) + except ValueError: + num = 0 + else: + try: + num = os.sysconf('SC_NPROCESSORS_ONLN') + except (ValueError, OSError, AttributeError): + num = 0 + + if num >= 1: + return num + else: + raise NotImplementedError('cannot determine number of cpus') + + +def freeze_support(): + ''' + Check whether this is a fake forked process in a frozen executable. + If so then run code specified by commandline and exit. + ''' + if sys.platform == 'win32' and getattr(sys, 'frozen', False): + from .forking import freeze_support + freeze_support() + + +def get_logger(): + ''' + Return package logger -- if it does not already exist then it is created + ''' + from .util import get_logger + return get_logger() + + +def log_to_stderr(level=None): + ''' + Turn on logging and add a handler which prints to stderr + ''' + from .util import log_to_stderr + return log_to_stderr(level) + + +def allow_connection_pickling(): + ''' + Install support for sending connections and sockets between processes + ''' + from . import reduction # noqa + +# +# Definitions depending on native semaphores +# + + +def Lock(): + ''' + Returns a non-recursive lock object + ''' + from .synchronize import Lock + return Lock() + + +def RLock(): + ''' + Returns a recursive lock object + ''' + from .synchronize import RLock + return RLock() + + +def Condition(lock=None): + ''' + Returns a condition object + ''' + from .synchronize import Condition + return Condition(lock) + + +def Semaphore(value=1): + ''' + Returns a semaphore object + ''' + from .synchronize import Semaphore + return Semaphore(value) + + +def BoundedSemaphore(value=1): + ''' + Returns a bounded semaphore object + ''' + from .synchronize import BoundedSemaphore + return BoundedSemaphore(value) + + +def Event(): + ''' + Returns an event object + ''' + from .synchronize import Event + return Event() + + +def Queue(maxsize=0): + ''' + Returns a queue object + ''' + from .queues import Queue + return Queue(maxsize) + + +def JoinableQueue(maxsize=0): + ''' + Returns a queue object + ''' + from .queues import JoinableQueue + return JoinableQueue(maxsize) + + +def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None, + timeout=None, soft_timeout=None, lost_worker_timeout=None, + max_restarts=None, max_restart_freq=1, on_process_up=None, + on_process_down=None, on_timeout_set=None, on_timeout_cancel=None, + threads=True, semaphore=None, putlocks=False, allow_restart=False): + ''' + Returns a process pool object + ''' + from .pool import Pool + return Pool(processes, initializer, initargs, maxtasksperchild, + timeout, soft_timeout, lost_worker_timeout, + max_restarts, max_restart_freq, on_process_up, + on_process_down, on_timeout_set, on_timeout_cancel, + threads, semaphore, putlocks, allow_restart) + + +def RawValue(typecode_or_type, *args): + ''' + Returns a shared object + ''' + from .sharedctypes import RawValue + return RawValue(typecode_or_type, *args) + + +def RawArray(typecode_or_type, size_or_initializer): + ''' + Returns a shared array + ''' + from .sharedctypes import RawArray + return RawArray(typecode_or_type, size_or_initializer) + + +def Value(typecode_or_type, *args, **kwds): + ''' + Returns a synchronized shared object + ''' + from .sharedctypes import Value + return Value(typecode_or_type, *args, **kwds) + + +def Array(typecode_or_type, size_or_initializer, **kwds): + ''' + Returns a synchronized shared array + ''' + from .sharedctypes import Array + return Array(typecode_or_type, size_or_initializer, **kwds) + +# +# +# + + +def set_executable(executable): + ''' + Sets the path to a python.exe or pythonw.exe binary used to run + child processes on Windows instead of sys.executable. + Useful for people embedding Python. + ''' + from .forking import set_executable + set_executable(executable) + + +def forking_is_enabled(): + ''' + Returns a boolean value indicating whether billiard is + currently set to create child processes by forking the current + python process rather than by starting a new instances of python. + + On Windows this always returns `False`. On Unix it returns `True` by + default. + ''' + from . import forking + return forking._forking_is_enabled + + +def forking_enable(value): + ''' + Enable/disable creation of child process by forking the current process. + + `value` should be a boolean value. If `value` is true then + forking is enabled. If `value` is false then forking is disabled. + On systems with `os.fork()` forking is enabled by default, and on + other systems it is always disabled. + ''' + if not value: + from ._ext import supports_exec + if supports_exec: + from . import forking + if value and not hasattr(os, 'fork'): + raise ValueError('os.fork() not found') + forking._forking_is_enabled = bool(value) + if not value: + os.environ["MULTIPROCESSING_FORKING_DISABLE"] = "1" + else: + warnings.warn(RuntimeWarning(W_NO_EXECV)) +if os.environ.get("MULTIPROCESSING_FORKING_DISABLE"): + forking_enable(False) diff --git a/thesisenv/lib/python3.6/site-packages/billiard/_ext.py b/thesisenv/lib/python3.6/site-packages/billiard/_ext.py new file mode 100644 index 0000000..fb2c055 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/_ext.py @@ -0,0 +1,40 @@ +from __future__ import absolute_import + +import sys + +supports_exec = True + +from .compat import _winapi as win32 # noqa + +if sys.platform.startswith("java"): + _billiard = None +else: + try: + import _billiard # noqa + except ImportError: + import _multiprocessing as _billiard # noqa + supports_exec = False + try: + Connection = _billiard.Connection + except AttributeError: # Py3 + from billiard.connection import Connection # noqa + + PipeConnection = getattr(_billiard, "PipeConnection", None) + + +def ensure_multiprocessing(): + if _billiard is None: + raise NotImplementedError("multiprocessing not supported") + + +def ensure_SemLock(): + try: + from _billiard import SemLock # noqa + except ImportError: + try: + from _multiprocessing import SemLock # noqa + except ImportError: + raise ImportError("""\ +This platform lacks a functioning sem_open implementation, therefore, +the required synchronization primitives needed will not function, +see issue 3770.""") diff --git a/thesisenv/lib/python3.6/site-packages/billiard/_win.py b/thesisenv/lib/python3.6/site-packages/billiard/_win.py new file mode 100644 index 0000000..dc0262e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/_win.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +""" + billiard._win + ~~~~~~~~~~~~~ + + Windows utilities to terminate process groups. + +""" +from __future__ import absolute_import + +import os + +# psutil is painfully slow in win32. So to avoid adding big +# dependencies like pywin32 a ctypes based solution is preferred + +# Code based on the winappdbg project http://winappdbg.sourceforge.net/ +# (BSD License) +from ctypes import ( + byref, sizeof, windll, + Structure, WinError, POINTER, + c_size_t, c_char, c_void_p, +) +from ctypes.wintypes import DWORD, LONG + +ERROR_NO_MORE_FILES = 18 +INVALID_HANDLE_VALUE = c_void_p(-1).value + + +class PROCESSENTRY32(Structure): + _fields_ = [ + ('dwSize', DWORD), + ('cntUsage', DWORD), + ('th32ProcessID', DWORD), + ('th32DefaultHeapID', c_size_t), + ('th32ModuleID', DWORD), + ('cntThreads', DWORD), + ('th32ParentProcessID', DWORD), + ('pcPriClassBase', LONG), + ('dwFlags', DWORD), + ('szExeFile', c_char * 260), + ] +LPPROCESSENTRY32 = POINTER(PROCESSENTRY32) + + +def CreateToolhelp32Snapshot(dwFlags=2, th32ProcessID=0): + hSnapshot = windll.kernel32.CreateToolhelp32Snapshot(dwFlags, + th32ProcessID) + if hSnapshot == INVALID_HANDLE_VALUE: + raise WinError() + return hSnapshot + + +def Process32First(hSnapshot, pe=None): + return _Process32n(windll.kernel32.Process32First, hSnapshot, pe) + + +def Process32Next(hSnapshot, pe=None): + return _Process32n(windll.kernel32.Process32Next, hSnapshot, pe) + + +def _Process32n(fun, hSnapshot, pe=None): + if pe is None: + pe = PROCESSENTRY32() + pe.dwSize = sizeof(PROCESSENTRY32) + success = fun(hSnapshot, byref(pe)) + if not success: + if windll.kernel32.GetLastError() == ERROR_NO_MORE_FILES: + return + raise WinError() + return pe + + +def get_all_processes_pids(): + """Return a dictionary with all processes pids as keys and their + parents as value. Ignore processes with no parents. + """ + h = CreateToolhelp32Snapshot() + parents = {} + pe = Process32First(h) + while pe: + if pe.th32ParentProcessID: + parents[pe.th32ProcessID] = pe.th32ParentProcessID + pe = Process32Next(h, pe) + + return parents + + +def get_processtree_pids(pid, include_parent=True): + """Return a list with all the pids of a process tree""" + parents = get_all_processes_pids() + all_pids = list(parents.keys()) + pids = set([pid]) + while 1: + pids_new = pids.copy() + + for _pid in all_pids: + if parents[_pid] in pids: + pids_new.add(_pid) + + if pids_new == pids: + break + + pids = pids_new.copy() + + if not include_parent: + pids.remove(pid) + + return list(pids) + + +def kill_processtree(pid, signum): + """Kill a process and all its descendants""" + family_pids = get_processtree_pids(pid) + + for _pid in family_pids: + os.kill(_pid, signum) diff --git a/thesisenv/lib/python3.6/site-packages/billiard/common.py b/thesisenv/lib/python3.6/site-packages/billiard/common.py new file mode 100644 index 0000000..4b8ab82 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/common.py @@ -0,0 +1,134 @@ +# -*- coding: utf-8 -*- +""" +This module contains utilities added by billiard, to keep +"non-core" functionality out of ``.util``.""" +from __future__ import absolute_import + +import os +import signal +import sys + +import pickle as pypickle +try: + import cPickle as cpickle +except ImportError: # pragma: no cover + cpickle = None # noqa + +from .exceptions import RestartFreqExceeded +from .five import monotonic + +if sys.version_info < (2, 6): # pragma: no cover + # cPickle does not use absolute_imports + pickle = pypickle + pickle_load = pypickle.load + pickle_loads = pypickle.loads +else: + pickle = cpickle or pypickle + pickle_load = pickle.load + pickle_loads = pickle.loads + +# cPickle.loads does not support buffer() objects, +# but we can just create a StringIO and use load. +if sys.version_info[0] == 3: + from io import BytesIO +else: + try: + from cStringIO import StringIO as BytesIO # noqa + except ImportError: + from StringIO import StringIO as BytesIO # noqa + +EX_SOFTWARE = 70 + +TERMSIGS_DEFAULT = ( + 'SIGHUP', + 'SIGQUIT', + 'SIGTERM', + 'SIGUSR1', + 'SIGUSR2' +) + +TERMSIGS_FULL = ( + 'SIGHUP', + 'SIGQUIT', + 'SIGTRAP', + 'SIGABRT', + 'SIGEMT', + 'SIGSYS', + 'SIGPIPE', + 'SIGALRM', + 'SIGTERM', + 'SIGXCPU', + 'SIGXFSZ', + 'SIGVTALRM', + 'SIGPROF', + 'SIGUSR1', + 'SIGUSR2', +) + +#: set by signal handlers just before calling exit. +#: if this is true after the sighandler returns it means that something +#: went wrong while terminating the process, and :func:`os._exit` +#: must be called ASAP. +_should_have_exited = [False] + + +def pickle_loads(s, load=pickle_load): + # used to support buffer objects + return load(BytesIO(s)) + + +def maybe_setsignal(signum, handler): + try: + signal.signal(signum, handler) + except (OSError, AttributeError, ValueError, RuntimeError): + pass + + +def _shutdown_cleanup(signum, frame): + # we will exit here so if the signal is received a second time + # we can be sure that something is very wrong and we may be in + # a crashing loop. + if _should_have_exited[0]: + os._exit(EX_SOFTWARE) + maybe_setsignal(signum, signal.SIG_DFL) + _should_have_exited[0] = True + sys.exit(-(256 - signum)) + + +def reset_signals(handler=_shutdown_cleanup, full=False): + for sig in TERMSIGS_FULL if full else TERMSIGS_DEFAULT: + try: + signum = getattr(signal, sig) + except AttributeError: + pass + else: + current = signal.getsignal(signum) + if current is not None and current != signal.SIG_IGN: + maybe_setsignal(signum, handler) + + +class restart_state(object): + RestartFreqExceeded = RestartFreqExceeded + + def __init__(self, maxR, maxT): + self.maxR, self.maxT = maxR, maxT + self.R, self.T = 0, None + + def step(self, now=None): + now = monotonic() if now is None else now + R = self.R + if self.T and now - self.T >= self.maxT: + # maxT passed, reset counter and time passed. + self.T, self.R = now, 0 + elif self.maxR and self.R >= self.maxR: + # verify that R has a value as the result handler + # resets this when a job is accepted. If a job is accepted + # the startup probably went fine (startup restart burst + # protection) + if self.R: # pragma: no cover + self.R = 0 # reset in case someone catches the error + raise self.RestartFreqExceeded("%r in %rs" % (R, self.maxT)) + # first run sets T + if self.T is None: + self.T = now + self.R += 1 diff --git a/thesisenv/lib/python3.6/site-packages/billiard/compat.py b/thesisenv/lib/python3.6/site-packages/billiard/compat.py new file mode 100644 index 0000000..aac4b7c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/compat.py @@ -0,0 +1,107 @@ +from __future__ import absolute_import + +import errno +import os +import sys + +from .five import range + +if sys.platform == 'win32': + try: + import _winapi # noqa + except ImportError: # pragma: no cover + try: + from _billiard import win32 as _winapi # noqa + except (ImportError, AttributeError): + from _multiprocessing import win32 as _winapi # noqa +else: + _winapi = None # noqa + + +if sys.version_info > (2, 7, 5): + buf_t, is_new_buffer = memoryview, True # noqa +else: + buf_t, is_new_buffer = buffer, False # noqa + +if hasattr(os, 'write'): + __write__ = os.write + + if is_new_buffer: + + def send_offset(fd, buf, offset): + return __write__(fd, buf[offset:]) + + else: # Py<2.7.6 + + def send_offset(fd, buf, offset): # noqa + return __write__(fd, buf_t(buf, offset)) + +else: # non-posix platform + + def send_offset(fd, buf, offset): # noqa + raise NotImplementedError('send_offset') + + +if sys.version_info[0] == 3: + bytes = bytes +else: + _bytes = bytes + + # the 'bytes' alias in Python2 does not support an encoding argument. + + class bytes(_bytes): # noqa + + def __new__(cls, *args): + if len(args) > 1: + return _bytes(args[0]).encode(*args[1:]) + return _bytes(*args) + +try: + closerange = os.closerange +except AttributeError: + + def closerange(fd_low, fd_high): # noqa + for fd in reversed(range(fd_low, fd_high)): + try: + os.close(fd) + except OSError as exc: + if exc.errno != errno.EBADF: + raise + + +def get_errno(exc): + """:exc:`socket.error` and :exc:`IOError` first got + the ``.errno`` attribute in Py2.7""" + try: + return exc.errno + except AttributeError: + try: + # e.args = (errno, reason) + if isinstance(exc.args, tuple) and len(exc.args) == 2: + return exc.args[0] + except AttributeError: + pass + return 0 + + +if sys.platform == 'win32': + + def setblocking(handle, blocking): + raise NotImplementedError('setblocking not implemented on win32') + + def isblocking(handle): + raise NotImplementedError('isblocking not implemented on win32') + +else: + from os import O_NONBLOCK + from fcntl import fcntl, F_GETFL, F_SETFL + + def isblocking(handle): # noqa + return not (fcntl(handle, F_GETFL) & O_NONBLOCK) + + def setblocking(handle, blocking): # noqa + flags = fcntl(handle, F_GETFL, 0) + fcntl( + handle, F_SETFL, + flags & (~O_NONBLOCK) if blocking else flags | O_NONBLOCK, + ) diff --git a/thesisenv/lib/python3.6/site-packages/billiard/connection.py b/thesisenv/lib/python3.6/site-packages/billiard/connection.py new file mode 100644 index 0000000..bd2e36e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/connection.py @@ -0,0 +1,27 @@ +from __future__ import absolute_import + +import sys + +is_pypy = hasattr(sys, 'pypy_version_info') + +if sys.version_info[0] == 3: + from .py3 import connection +else: + from .py2 import connection # noqa + + +if is_pypy: + import _multiprocessing + from .compat import setblocking, send_offset + + class Connection(_multiprocessing.Connection): + + def send_offset(self, buf, offset): + return send_offset(self.fileno(), buf, offset) + + def setblocking(self, blocking): + setblocking(self.fileno(), blocking) + _multiprocessing.Connection = Connection + + +sys.modules[__name__] = connection diff --git a/thesisenv/lib/python3.6/site-packages/billiard/dummy/__init__.py b/thesisenv/lib/python3.6/site-packages/billiard/dummy/__init__.py new file mode 100644 index 0000000..e6e78a6 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/dummy/__init__.py @@ -0,0 +1,165 @@ +# +# Support for the API of the multiprocessing package using threads +# +# multiprocessing/dummy/__init__.py +# +# Copyright (c) 2006-2008, R Oudkerk +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of author nor the names of any contributors may be +# used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. +# +from __future__ import absolute_import + +# +# Imports +# + +import threading +import sys +import weakref +import array + +from threading import Lock, RLock, Semaphore, BoundedSemaphore +from threading import Event + +from billiard.five import Queue + +from billiard.connection import Pipe + +__all__ = [ + 'Process', 'current_process', 'active_children', 'freeze_support', + 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', + 'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' +] + + +class DummyProcess(threading.Thread): + + def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): + threading.Thread.__init__(self, group, target, name, args, kwargs) + self._pid = None + self._children = weakref.WeakKeyDictionary() + self._start_called = False + self._parent = current_process() + + def start(self): + assert self._parent is current_process() + self._start_called = True + self._parent._children[self] = None + threading.Thread.start(self) + + @property + def exitcode(self): + if self._start_called and not self.is_alive(): + return 0 + else: + return None + + +try: + _Condition = threading._Condition +except AttributeError: # Py3 + _Condition = threading.Condition # noqa + + +class Condition(_Condition): + if sys.version_info[0] == 3: + notify_all = _Condition.notifyAll + else: + notify_all = _Condition.notifyAll.__func__ + + +Process = DummyProcess +current_process = threading.currentThread +current_process()._children = weakref.WeakKeyDictionary() + + +def active_children(): + children = current_process()._children + for p in list(children): + if not p.is_alive(): + children.pop(p, None) + return list(children) + + +def freeze_support(): + pass + + +class Namespace(object): + + def __init__(self, **kwds): + self.__dict__.update(kwds) + + def __repr__(self): + items = list(self.__dict__.items()) + temp = [] + for name, value in items: + if not name.startswith('_'): + temp.append('%s=%r' % (name, value)) + temp.sort() + return 'Namespace(%s)' % str.join(', ', temp) + + +dict = dict +list = list + + +def Array(typecode, sequence, lock=True): + return array.array(typecode, sequence) + + +class Value(object): + + def __init__(self, typecode, value, lock=True): + self._typecode = typecode + self._value = value + + def _get(self): + return self._value + + def _set(self, value): + self._value = value + value = property(_get, _set) + + def __repr__(self): + return '<%r(%r, %r)>' % (type(self).__name__, + self._typecode, self._value) + + +def Manager(): + return sys.modules[__name__] + + +def shutdown(): + pass + + +def Pool(processes=None, initializer=None, initargs=()): + from billiard.pool import ThreadPool + return ThreadPool(processes, initializer, initargs) + +JoinableQueue = Queue diff --git a/thesisenv/lib/python3.6/site-packages/billiard/dummy/connection.py b/thesisenv/lib/python3.6/site-packages/billiard/dummy/connection.py new file mode 100644 index 0000000..6bf6b9d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/dummy/connection.py @@ -0,0 +1,93 @@ +# +# Analogue of `multiprocessing.connection` which uses queues instead of sockets +# +# multiprocessing/dummy/connection.py +# +# Copyright (c) 2006-2008, R Oudkerk +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of author nor the names of any contributors may be +# used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. +# +from __future__ import absolute_import + +from billiard.five import Queue + +__all__ = ['Client', 'Listener', 'Pipe'] + +families = [None] + + +class Listener(object): + + def __init__(self, address=None, family=None, backlog=1): + self._backlog_queue = Queue(backlog) + + def accept(self): + return Connection(*self._backlog_queue.get()) + + def close(self): + self._backlog_queue = None + + address = property(lambda self: self._backlog_queue) + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + + +def Client(address): + _in, _out = Queue(), Queue() + address.put((_out, _in)) + return Connection(_in, _out) + + +def Pipe(duplex=True): + a, b = Queue(), Queue() + return Connection(a, b), Connection(b, a) + + +class Connection(object): + + def __init__(self, _in, _out): + self._out = _out + self._in = _in + self.send = self.send_bytes = _out.put + self.recv = self.recv_bytes = _in.get + + def poll(self, timeout=0.0): + if self._in.qsize() > 0: + return True + if timeout <= 0.0: + return False + self._in.not_empty.acquire() + self._in.not_empty.wait(timeout) + self._in.not_empty.release() + return self._in.qsize() > 0 + + def close(self): + pass diff --git a/thesisenv/lib/python3.6/site-packages/billiard/einfo.py b/thesisenv/lib/python3.6/site-packages/billiard/einfo.py new file mode 100644 index 0000000..a091c15 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/einfo.py @@ -0,0 +1,134 @@ +from __future__ import absolute_import + +import sys +import traceback + +__all__ = ['ExceptionInfo', 'Traceback'] + +DEFAULT_MAX_FRAMES = sys.getrecursionlimit() // 8 + + +class _Code(object): + + def __init__(self, code): + self.co_filename = code.co_filename + self.co_name = code.co_name + self.co_argcount = code.co_argcount + self.co_cellvars = () + self.co_firstlineno = code.co_firstlineno + self.co_flags = code.co_flags + self.co_freevars = () + self.co_code = b'' + self.co_lnotab = b'' + self.co_names = code.co_names + self.co_nlocals = code.co_nlocals + self.co_stacksize = code.co_stacksize + self.co_varnames = () + + +class _Frame(object): + Code = _Code + + def __init__(self, frame): + self.f_builtins = {} + self.f_globals = { + "__file__": frame.f_globals.get("__file__", "__main__"), + "__name__": frame.f_globals.get("__name__"), + "__loader__": None, + } + self.f_locals = fl = {} + try: + fl["__traceback_hide__"] = frame.f_locals["__traceback_hide__"] + except KeyError: + pass + self.f_trace = None + self.f_exc_traceback = None + self.f_exc_type = None + self.f_exc_value = None + self.f_code = self.Code(frame.f_code) + self.f_lineno = frame.f_lineno + self.f_lasti = frame.f_lasti + # don't want to hit https://bugs.python.org/issue21967 + self.f_restricted = False + + +class _Object(object): + + def __init__(self, **kw): + [setattr(self, k, v) for k, v in kw.items()] + + +class _Truncated(object): + + def __init__(self): + self.tb_lineno = -1 + self.tb_frame = _Object( + f_globals={"__file__": "", + "__name__": "", + "__loader__": None}, + f_fileno=None, + f_code=_Object(co_filename="...", + co_name="[rest of traceback truncated]"), + ) + self.tb_next = None + self.tb_lasti = 0 + + +class Traceback(object): + Frame = _Frame + + def __init__(self, tb, max_frames=DEFAULT_MAX_FRAMES, depth=0): + self.tb_frame = self.Frame(tb.tb_frame) + self.tb_lineno = tb.tb_lineno + self.tb_lasti = tb.tb_lasti + self.tb_next = None + if tb.tb_next is not None: + if depth <= max_frames: + self.tb_next = Traceback(tb.tb_next, max_frames, depth + 1) + else: + self.tb_next = _Truncated() + + +class ExceptionInfo(object): + """Exception wrapping an exception and its traceback. + + :param exc_info: The exception info tuple as returned by + :func:`sys.exc_info`. + + """ + + #: Exception type. + type = None + + #: Exception instance. + exception = None + + #: Pickleable traceback instance for use with :mod:`traceback` + tb = None + + #: String representation of the traceback. + traceback = None + + #: Set to true if this is an internal error. + internal = False + + def __init__(self, exc_info=None, internal=False): + self.type, self.exception, tb = exc_info or sys.exc_info() + try: + self.tb = Traceback(tb) + self.traceback = ''.join( + traceback.format_exception(self.type, self.exception, tb), + ) + self.internal = internal + finally: + del(tb) + + def __str__(self): + return self.traceback + + def __repr__(self): + return "" % (self.exception, ) + + @property + def exc_info(self): + return self.type, self.exception, self.tb diff --git a/thesisenv/lib/python3.6/site-packages/billiard/exceptions.py b/thesisenv/lib/python3.6/site-packages/billiard/exceptions.py new file mode 100644 index 0000000..df56083 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/exceptions.py @@ -0,0 +1,54 @@ +from __future__ import absolute_import + +try: + from multiprocessing import ( + ProcessError, + BufferTooShort, + TimeoutError, + AuthenticationError, + ) +except ImportError: + class ProcessError(Exception): # noqa + pass + + class BufferTooShort(Exception): # noqa + pass + + class TimeoutError(Exception): # noqa + pass + + class AuthenticationError(Exception): # noqa + pass + + +class TimeLimitExceeded(Exception): + """The time limit has been exceeded and the job has been terminated.""" + + def __str__(self): + return "TimeLimitExceeded%s" % (self.args, ) + + +class SoftTimeLimitExceeded(Exception): + """The soft time limit has been exceeded. This exception is raised + to give the task a chance to clean up.""" + + def __str__(self): + return "SoftTimeLimitExceeded%s" % (self.args, ) + + +class WorkerLostError(Exception): + """The worker processing a job has exited prematurely.""" + + +class Terminated(Exception): + """The worker processing a job has been terminated by user request.""" + + +class RestartFreqExceeded(Exception): + """Restarts too fast.""" + + +class CoroStop(Exception): + """Coroutine exit, as opposed to StopIteration which may + mean it should be restarted.""" + pass diff --git a/thesisenv/lib/python3.6/site-packages/billiard/five.py b/thesisenv/lib/python3.6/site-packages/billiard/five.py new file mode 100644 index 0000000..8630e2e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/five.py @@ -0,0 +1,192 @@ +# -*- coding: utf-8 -*- +""" + celery.five + ~~~~~~~~~~~ + + Compatibility implementations of features + only available in newer Python versions. + + +""" +from __future__ import absolute_import + +# ############# py3k ######################################################### +import sys +PY3 = sys.version_info[0] == 3 + +try: + reload = reload # noqa +except NameError: # pragma: no cover + from imp import reload # noqa + +try: + from UserList import UserList # noqa +except ImportError: # pragma: no cover + from collections import UserList # noqa + +try: + from UserDict import UserDict # noqa +except ImportError: # pragma: no cover + from collections import UserDict # noqa + +# ############# time.monotonic ############################################### + +if sys.version_info < (3, 3): + + import platform + SYSTEM = platform.system() + + try: + import ctypes + except ImportError: # pragma: no cover + ctypes = None # noqa + + if SYSTEM == 'Darwin' and ctypes is not None: + from ctypes.util import find_library + libSystem = ctypes.CDLL(find_library('libSystem.dylib')) + CoreServices = ctypes.CDLL(find_library('CoreServices'), + use_errno=True) + mach_absolute_time = libSystem.mach_absolute_time + mach_absolute_time.restype = ctypes.c_uint64 + absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds + absolute_to_nanoseconds.restype = ctypes.c_uint64 + absolute_to_nanoseconds.argtypes = [ctypes.c_uint64] + + def _monotonic(): + return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9 + + elif SYSTEM == 'Linux' and ctypes is not None: + # from stackoverflow: + # questions/1205722/how-do-i-get-monotonic-time-durations-in-python + import ctypes + import os + + CLOCK_MONOTONIC = 1 # see + + class timespec(ctypes.Structure): + _fields_ = [ + ('tv_sec', ctypes.c_long), + ('tv_nsec', ctypes.c_long), + ] + + librt = ctypes.CDLL('librt.so.1', use_errno=True) + clock_gettime = librt.clock_gettime + clock_gettime.argtypes = [ + ctypes.c_int, ctypes.POINTER(timespec), + ] + + def _monotonic(): # noqa + t = timespec() + if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0: + errno_ = ctypes.get_errno() + raise OSError(errno_, os.strerror(errno_)) + return t.tv_sec + t.tv_nsec * 1e-9 + else: + from time import time as _monotonic + +try: + from time import monotonic +except ImportError: + monotonic = _monotonic # noqa + +if PY3: + import builtins + + from queue import Queue, Empty, Full + from itertools import zip_longest + from io import StringIO, BytesIO + + map = map + string = str + string_t = str + long_t = int + text_t = str + range = range + int_types = (int, ) + + open_fqdn = 'builtins.open' + + def items(d): + return d.items() + + def keys(d): + return d.keys() + + def values(d): + return d.values() + + def nextfun(it): + return it.__next__ + + exec_ = getattr(builtins, 'exec') + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + + class WhateverIO(StringIO): + + def write(self, data): + if isinstance(data, bytes): + data = data.encode() + StringIO.write(self, data) + +else: + import __builtin__ as builtins # noqa + from Queue import Queue, Empty, Full # noqa + from itertools import imap as map, izip_longest as zip_longest # noqa + from StringIO import StringIO # noqa + string = unicode # noqa + string_t = basestring # noqa + text_t = unicode + long_t = long # noqa + range = xrange + int_types = (int, long) + + open_fqdn = '__builtin__.open' + + def items(d): # noqa + return d.iteritems() + + def keys(d): # noqa + return d.iterkeys() + + def values(d): # noqa + return d.itervalues() + + def nextfun(it): # noqa + return it.next + + def exec_(code, globs=None, locs=None): + """Execute code in a namespace.""" + if globs is None: + frame = sys._getframe(1) + globs = frame.f_globals + if locs is None: + locs = frame.f_locals + del frame + elif locs is None: + locs = globs + exec("""exec code in globs, locs""") + + exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""") + + BytesIO = WhateverIO = StringIO # noqa + + +def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])): + """Class decorator to set metaclass. + + Works with both Python 2 and Python 3 and it does not add + an extra class in the lookup order like ``six.with_metaclass`` does + (that is -- it copies the original class instead of using inheritance). + + """ + + def _clone_with_metaclass(Class): + attrs = dict((key, value) for key, value in items(vars(Class)) + if key not in skip_attrs) + return Type(Class.__name__, Class.__bases__, attrs) + + return _clone_with_metaclass diff --git a/thesisenv/lib/python3.6/site-packages/billiard/forking.py b/thesisenv/lib/python3.6/site-packages/billiard/forking.py new file mode 100644 index 0000000..d67ef8a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/forking.py @@ -0,0 +1,580 @@ +# +# Module for starting a process object using os.fork() or CreateProcess() +# +# multiprocessing/forking.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +from __future__ import absolute_import + +import os +import sys +import signal +import warnings + +from pickle import load, HIGHEST_PROTOCOL +from billiard import util +from billiard import process +from billiard.five import int_types +from .reduction import dump +from .compat import _winapi as win32 + +__all__ = ['Popen', 'assert_spawning', 'exit', + 'duplicate', 'close'] + +try: + WindowsError = WindowsError # noqa +except NameError: + class WindowsError(Exception): # noqa + pass + +W_OLD_DJANGO_LAYOUT = """\ +Will add directory %r to path! This is necessary to accommodate \ +pre-Django 1.4 layouts using setup_environ. +You can skip this warning by adding a DJANGO_SETTINGS_MODULE=settings \ +environment variable. +""" + +# +# Choose whether to do a fork or spawn (fork+exec) on Unix. +# This affects how some shared resources should be created. +# + +_forking_is_enabled = sys.platform != 'win32' + +# +# Check that the current thread is spawning a child process +# + + +def assert_spawning(self): + if not Popen.thread_is_spawning(): + raise RuntimeError( + '%s objects should only be shared between processes' + ' through inheritance' % type(self).__name__ + ) + + +# +# Unix +# + +if sys.platform != 'win32': + try: + import thread + except ImportError: + import _thread as thread # noqa + import select + + WINEXE = False + WINSERVICE = False + + exit = os._exit + duplicate = os.dup + close = os.close + _select = util._eintr_retry(select.select) + + # + # We define a Popen class similar to the one from subprocess, but + # whose constructor takes a process object as its argument. + # + + class Popen(object): + + _tls = thread._local() + + def __init__(self, process_obj): + # register reducers + from billiard import connection # noqa + _Django_old_layout_hack__save() + sys.stdout.flush() + sys.stderr.flush() + self.returncode = None + r, w = os.pipe() + self.sentinel = r + + if _forking_is_enabled: + self.pid = os.fork() + if self.pid == 0: + os.close(r) + if 'random' in sys.modules: + import random + random.seed() + code = process_obj._bootstrap() + os._exit(code) + else: + from_parent_fd, to_child_fd = os.pipe() + cmd = get_command_line() + [str(from_parent_fd)] + + self.pid = os.fork() + if self.pid == 0: + os.close(r) + os.close(to_child_fd) + os.execv(sys.executable, cmd) + + # send information to child + prep_data = get_preparation_data(process_obj._name) + os.close(from_parent_fd) + to_child = os.fdopen(to_child_fd, 'wb') + Popen._tls.process_handle = self.pid + try: + dump(prep_data, to_child, HIGHEST_PROTOCOL) + dump(process_obj, to_child, HIGHEST_PROTOCOL) + finally: + del(Popen._tls.process_handle) + to_child.close() + + # `w` will be closed when the child exits, at which point `r` + # will become ready for reading (using e.g. select()). + os.close(w) + util.Finalize(self, os.close, (self.sentinel,)) + + def poll(self, flag=os.WNOHANG): + if self.returncode is None: + try: + pid, sts = os.waitpid(self.pid, flag) + except os.error: + # Child process not yet created. See #1731717 + # e.errno == errno.ECHILD == 10 + return None + if pid == self.pid: + if os.WIFSIGNALED(sts): + self.returncode = -os.WTERMSIG(sts) + else: + assert os.WIFEXITED(sts) + self.returncode = os.WEXITSTATUS(sts) + return self.returncode + + def wait(self, timeout=None): + if self.returncode is None: + if timeout is not None: + r = _select([self.sentinel], [], [], timeout)[0] + if not r: + return None + # This shouldn't block if select() returned successfully. + return self.poll(os.WNOHANG if timeout == 0.0 else 0) + return self.returncode + + def terminate(self): + if self.returncode is None: + try: + os.kill(self.pid, signal.SIGTERM) + except OSError: + if self.wait(timeout=0.1) is None: + raise + + @staticmethod + def thread_is_spawning(): + if _forking_is_enabled: + return False + else: + return getattr(Popen._tls, 'process_handle', None) is not None + + @staticmethod + def duplicate_for_child(handle): + return handle + +# +# Windows +# + +else: + try: + import thread + except ImportError: + import _thread as thread # noqa + import msvcrt + try: + import _subprocess + except ImportError: + import _winapi as _subprocess # noqa + + # + # + # + + TERMINATE = 0x10000 + WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) + WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") + + exit = win32.ExitProcess + close = win32.CloseHandle + + # + # + # + + def duplicate(handle, target_process=None, inheritable=False): + if target_process is None: + target_process = _subprocess.GetCurrentProcess() + h = _subprocess.DuplicateHandle( + _subprocess.GetCurrentProcess(), handle, target_process, + 0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS + ) + if sys.version_info[0] < 3 or ( + sys.version_info[0] == 3 and sys.version_info[1] < 3): + h = h.Detach() + return h + + # + # We define a Popen class similar to the one from subprocess, but + # whose constructor takes a process object as its argument. + # + + class Popen(object): + ''' + Start a subprocess to run the code of a process object + ''' + _tls = thread._local() + + def __init__(self, process_obj): + _Django_old_layout_hack__save() + # create pipe for communication with child + rfd, wfd = os.pipe() + + # get handle for read end of the pipe and make it inheritable + rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True) + os.close(rfd) + + # start process + cmd = get_command_line() + [rhandle] + cmd = ' '.join('"%s"' % x for x in cmd) + hp, ht, pid, tid = _subprocess.CreateProcess( + _python_exe, cmd, None, None, 1, 0, None, None, None + ) + close(ht) if isinstance(ht, int_types) else ht.Close() + (close(rhandle) if isinstance(rhandle, int_types) + else rhandle.Close()) + + # set attributes of self + self.pid = pid + self.returncode = None + self._handle = hp + self.sentinel = int(hp) + + # send information to child + prep_data = get_preparation_data(process_obj._name) + to_child = os.fdopen(wfd, 'wb') + Popen._tls.process_handle = int(hp) + try: + dump(prep_data, to_child, HIGHEST_PROTOCOL) + dump(process_obj, to_child, HIGHEST_PROTOCOL) + finally: + del Popen._tls.process_handle + to_child.close() + + @staticmethod + def thread_is_spawning(): + return getattr(Popen._tls, 'process_handle', None) is not None + + @staticmethod + def duplicate_for_child(handle): + return duplicate(handle, Popen._tls.process_handle) + + def wait(self, timeout=None): + if self.returncode is None: + if timeout is None: + msecs = _subprocess.INFINITE + else: + msecs = max(0, int(timeout * 1000 + 0.5)) + + res = _subprocess.WaitForSingleObject(int(self._handle), msecs) + if res == _subprocess.WAIT_OBJECT_0: + code = _subprocess.GetExitCodeProcess(self._handle) + if code == TERMINATE: + code = -signal.SIGTERM + self.returncode = code + + return self.returncode + + def poll(self): + return self.wait(timeout=0) + + def terminate(self): + if self.returncode is None: + try: + _subprocess.TerminateProcess(int(self._handle), TERMINATE) + except WindowsError: + if self.wait(timeout=0.1) is None: + raise + + # + # + # + +if WINSERVICE: + _python_exe = os.path.join(sys.exec_prefix, 'python.exe') +else: + _python_exe = sys.executable + + +def set_executable(exe): + global _python_exe + _python_exe = exe + + +def is_forking(argv): + ''' + Return whether commandline indicates we are forking + ''' + if len(argv) >= 2 and argv[1] == '--billiard-fork': + assert len(argv) == 3 + os.environ["FORKED_BY_MULTIPROCESSING"] = "1" + return True + else: + return False + + +def freeze_support(): + ''' + Run code for process object if this in not the main process + ''' + if is_forking(sys.argv): + main() + sys.exit() + + +def get_command_line(): + ''' + Returns prefix of command line used for spawning a child process + ''' + if process.current_process()._identity == () and is_forking(sys.argv): + raise RuntimeError(''' + Attempt to start a new process before the current process + has finished its bootstrapping phase. + + This probably means that have forgotten to use the proper + idiom in the main module: + + if __name__ == '__main__': + freeze_support() + ... + + The "freeze_support()" line can be omitted if the program + is not going to be frozen to produce a Windows executable.''') + + if getattr(sys, 'frozen', False): + return [sys.executable, '--billiard-fork'] + else: + prog = 'from billiard.forking import main; main()' + return [_python_exe, '-c', prog, '--billiard-fork'] + + +def _Django_old_layout_hack__save(): + if 'DJANGO_PROJECT_DIR' not in os.environ: + try: + settings_name = os.environ['DJANGO_SETTINGS_MODULE'] + except KeyError: + return # not using Django. + + conf_settings = sys.modules.get('django.conf.settings') + configured = conf_settings and conf_settings.configured + try: + project_name, _ = settings_name.split('.', 1) + except ValueError: + return # not modified by setup_environ + + project = __import__(project_name) + try: + project_dir = os.path.normpath(_module_parent_dir(project)) + except AttributeError: + return # dynamically generated module (no __file__) + if configured: + warnings.warn(UserWarning( + W_OLD_DJANGO_LAYOUT % os.path.realpath(project_dir) + )) + os.environ['DJANGO_PROJECT_DIR'] = project_dir + + +def _Django_old_layout_hack__load(): + try: + sys.path.append(os.environ['DJANGO_PROJECT_DIR']) + except KeyError: + pass + + +def _module_parent_dir(mod): + dir, filename = os.path.split(_module_dir(mod)) + if dir == os.curdir or not dir: + dir = os.getcwd() + return dir + + +def _module_dir(mod): + if '__init__.py' in mod.__file__: + return os.path.dirname(mod.__file__) + return mod.__file__ + + +def main(): + ''' + Run code specifed by data received over pipe + ''' + global _forking_is_enabled + _Django_old_layout_hack__load() + + assert is_forking(sys.argv) + _forking_is_enabled = False + + handle = int(sys.argv[-1]) + if sys.platform == 'win32': + fd = msvcrt.open_osfhandle(handle, os.O_RDONLY) + else: + fd = handle + from_parent = os.fdopen(fd, 'rb') + + process.current_process()._inheriting = True + preparation_data = load(from_parent) + prepare(preparation_data) + # Huge hack to make logging before Process.run work. + try: + os.environ["MP_MAIN_FILE"] = sys.modules["__main__"].__file__ + except KeyError: + pass + except AttributeError: + pass + loglevel = os.environ.get("_MP_FORK_LOGLEVEL_") + logfile = os.environ.get("_MP_FORK_LOGFILE_") or None + format = os.environ.get("_MP_FORK_LOGFORMAT_") + if loglevel: + from billiard import util + import logging + logger = util.get_logger() + logger.setLevel(int(loglevel)) + if not logger.handlers: + logger._rudimentary_setup = True + logfile = logfile or sys.__stderr__ + if hasattr(logfile, "write"): + handler = logging.StreamHandler(logfile) + else: + handler = logging.FileHandler(logfile) + formatter = logging.Formatter( + format or util.DEFAULT_LOGGING_FORMAT, + ) + handler.setFormatter(formatter) + logger.addHandler(handler) + + self = load(from_parent) + process.current_process()._inheriting = False + + from_parent.close() + + exitcode = self._bootstrap() + exit(exitcode) + + +def get_preparation_data(name): + ''' + Return info about parent needed by child to unpickle process object + ''' + from billiard.util import _logger, _log_to_stderr + + d = dict( + name=name, + sys_path=sys.path, + sys_argv=sys.argv, + log_to_stderr=_log_to_stderr, + orig_dir=process.ORIGINAL_DIR, + authkey=process.current_process().authkey, + ) + + if _logger is not None: + d['log_level'] = _logger.getEffectiveLevel() + + if not WINEXE and not WINSERVICE: + main_path = getattr(sys.modules['__main__'], '__file__', None) + if not main_path and sys.argv[0] not in ('', '-c'): + main_path = sys.argv[0] + if main_path is not None: + if (not os.path.isabs(main_path) and + process.ORIGINAL_DIR is not None): + main_path = os.path.join(process.ORIGINAL_DIR, main_path) + d['main_path'] = os.path.normpath(main_path) + + return d + +# +# Prepare current process +# + +old_main_modules = [] + + +def prepare(data): + ''' + Try to get current process ready to unpickle process object + ''' + old_main_modules.append(sys.modules['__main__']) + + if 'name' in data: + process.current_process().name = data['name'] + + if 'authkey' in data: + process.current_process()._authkey = data['authkey'] + + if 'log_to_stderr' in data and data['log_to_stderr']: + util.log_to_stderr() + + if 'log_level' in data: + util.get_logger().setLevel(data['log_level']) + + if 'sys_path' in data: + sys.path = data['sys_path'] + + if 'sys_argv' in data: + sys.argv = data['sys_argv'] + + if 'dir' in data: + os.chdir(data['dir']) + + if 'orig_dir' in data: + process.ORIGINAL_DIR = data['orig_dir'] + + if 'main_path' in data: + main_path = data['main_path'] + main_name = os.path.splitext(os.path.basename(main_path))[0] + if main_name == '__init__': + main_name = os.path.basename(os.path.dirname(main_path)) + + if main_name == '__main__': + main_module = sys.modules['__main__'] + main_module.__file__ = main_path + elif main_name != 'ipython': + # Main modules not actually called __main__.py may + # contain additional code that should still be executed + import imp + + if main_path is None: + dirs = None + elif os.path.basename(main_path).startswith('__init__.py'): + dirs = [os.path.dirname(os.path.dirname(main_path))] + else: + dirs = [os.path.dirname(main_path)] + + assert main_name not in sys.modules, main_name + file, path_name, etc = imp.find_module(main_name, dirs) + try: + # We would like to do "imp.load_module('__main__', ...)" + # here. However, that would cause 'if __name__ == + # "__main__"' clauses to be executed. + main_module = imp.load_module( + '__parents_main__', file, path_name, etc + ) + finally: + if file: + file.close() + + sys.modules['__main__'] = main_module + main_module.__name__ = '__main__' + + # Try to make the potentially picklable objects in + # sys.modules['__main__'] realize they are in the main + # module -- somewhat ugly. + for obj in list(main_module.__dict__.values()): + try: + if obj.__module__ == '__parents_main__': + obj.__module__ = '__main__' + except Exception: + pass diff --git a/thesisenv/lib/python3.6/site-packages/billiard/heap.py b/thesisenv/lib/python3.6/site-packages/billiard/heap.py new file mode 100644 index 0000000..027a050 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/heap.py @@ -0,0 +1,255 @@ +# +# Module which supports allocation of memory from an mmap +# +# multiprocessing/heap.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# +from __future__ import absolute_import + +import bisect +import mmap +import os +import sys +import threading +import itertools + +from ._ext import _billiard, win32 +from .util import Finalize, info, get_temp_dir +from .forking import assert_spawning +from .reduction import ForkingPickler + +__all__ = ['BufferWrapper'] + +try: + maxsize = sys.maxsize +except AttributeError: + maxsize = sys.maxint + +# +# Inheirtable class which wraps an mmap, and from which blocks can be allocated +# + +if sys.platform == 'win32': + + class Arena(object): + + _counter = itertools.count() + + def __init__(self, size): + self.size = size + self.name = 'pym-%d-%d' % (os.getpid(), next(Arena._counter)) + self.buffer = mmap.mmap(-1, self.size, tagname=self.name) + assert win32.GetLastError() == 0, 'tagname already in use' + self._state = (self.size, self.name) + + def __getstate__(self): + assert_spawning(self) + return self._state + + def __setstate__(self, state): + self.size, self.name = self._state = state + self.buffer = mmap.mmap(-1, self.size, tagname=self.name) + assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS + +else: + + class Arena(object): + + _counter = itertools.count() + + def __init__(self, size, fileno=-1): + from .forking import _forking_is_enabled + self.size = size + self.fileno = fileno + if fileno == -1 and not _forking_is_enabled: + name = os.path.join( + get_temp_dir(), + 'pym-%d-%d' % (os.getpid(), next(self._counter))) + self.fileno = os.open( + name, os.O_RDWR | os.O_CREAT | os.O_EXCL, 0o600) + os.unlink(name) + os.ftruncate(self.fileno, size) + self.buffer = mmap.mmap(self.fileno, self.size) + + def reduce_arena(a): + if a.fileno == -1: + raise ValueError('Arena is unpicklable because' + 'forking was enabled when it was created') + return Arena, (a.size, a.fileno) + + ForkingPickler.register(Arena, reduce_arena) + +# +# Class allowing allocation of chunks of memory from arenas +# + + +class Heap(object): + + _alignment = 8 + + def __init__(self, size=mmap.PAGESIZE): + self._lastpid = os.getpid() + self._lock = threading.Lock() + self._size = size + self._lengths = [] + self._len_to_seq = {} + self._start_to_block = {} + self._stop_to_block = {} + self._allocated_blocks = set() + self._arenas = [] + # list of pending blocks to free - see free() comment below + self._pending_free_blocks = [] + + @staticmethod + def _roundup(n, alignment): + # alignment must be a power of 2 + mask = alignment - 1 + return (n + mask) & ~mask + + def _malloc(self, size): + # returns a large enough block -- it might be much larger + i = bisect.bisect_left(self._lengths, size) + if i == len(self._lengths): + length = self._roundup(max(self._size, size), mmap.PAGESIZE) + self._size *= 2 + info('allocating a new mmap of length %d', length) + arena = Arena(length) + self._arenas.append(arena) + return (arena, 0, length) + else: + length = self._lengths[i] + seq = self._len_to_seq[length] + block = seq.pop() + if not seq: + del self._len_to_seq[length], self._lengths[i] + + (arena, start, stop) = block + del self._start_to_block[(arena, start)] + del self._stop_to_block[(arena, stop)] + return block + + def _free(self, block): + # free location and try to merge with neighbours + (arena, start, stop) = block + + try: + prev_block = self._stop_to_block[(arena, start)] + except KeyError: + pass + else: + start, _ = self._absorb(prev_block) + + try: + next_block = self._start_to_block[(arena, stop)] + except KeyError: + pass + else: + _, stop = self._absorb(next_block) + + block = (arena, start, stop) + length = stop - start + + try: + self._len_to_seq[length].append(block) + except KeyError: + self._len_to_seq[length] = [block] + bisect.insort(self._lengths, length) + + self._start_to_block[(arena, start)] = block + self._stop_to_block[(arena, stop)] = block + + def _absorb(self, block): + # deregister this block so it can be merged with a neighbour + (arena, start, stop) = block + del self._start_to_block[(arena, start)] + del self._stop_to_block[(arena, stop)] + + length = stop - start + seq = self._len_to_seq[length] + seq.remove(block) + if not seq: + del self._len_to_seq[length] + self._lengths.remove(length) + + return start, stop + + def _free_pending_blocks(self): + # Free all the blocks in the pending list - called with the lock held + while 1: + try: + block = self._pending_free_blocks.pop() + except IndexError: + break + self._allocated_blocks.remove(block) + self._free(block) + + def free(self, block): + # free a block returned by malloc() + # Since free() can be called asynchronously by the GC, it could happen + # that it's called while self._lock is held: in that case, + # self._lock.acquire() would deadlock (issue #12352). To avoid that, a + # trylock is used instead, and if the lock can't be acquired + # immediately, the block is added to a list of blocks to be freed + # synchronously sometimes later from malloc() or free(), by calling + # _free_pending_blocks() (appending and retrieving from a list is not + # strictly thread-safe but under cPython it's atomic thanks + # to the GIL). + assert os.getpid() == self._lastpid + if not self._lock.acquire(False): + # can't aquire the lock right now, add the block to the list of + # pending blocks to free + self._pending_free_blocks.append(block) + else: + # we hold the lock + try: + self._free_pending_blocks() + self._allocated_blocks.remove(block) + self._free(block) + finally: + self._lock.release() + + def malloc(self, size): + # return a block of right size (possibly rounded up) + assert 0 <= size < maxsize + if os.getpid() != self._lastpid: + self.__init__() # reinitialize after fork + self._lock.acquire() + self._free_pending_blocks() + try: + size = self._roundup(max(size, 1), self._alignment) + (arena, start, stop) = self._malloc(size) + new_stop = start + size + if new_stop < stop: + self._free((arena, new_stop, stop)) + block = (arena, start, new_stop) + self._allocated_blocks.add(block) + return block + finally: + self._lock.release() + +# +# Class representing a chunk of an mmap -- can be inherited +# + + +class BufferWrapper(object): + + _heap = Heap() + + def __init__(self, size): + assert 0 <= size < maxsize + block = BufferWrapper._heap.malloc(size) + self._state = (block, size) + Finalize(self, BufferWrapper._heap.free, args=(block,)) + + def get_address(self): + (arena, start, stop), size = self._state + address, length = _billiard.address_of_buffer(arena.buffer) + assert size <= length + return address + start + + def get_size(self): + return self._state[1] diff --git a/thesisenv/lib/python3.6/site-packages/billiard/managers.py b/thesisenv/lib/python3.6/site-packages/billiard/managers.py new file mode 100644 index 0000000..f08ba7f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/managers.py @@ -0,0 +1,1169 @@ +# +# Module providing the `SyncManager` class for dealing +# with shared objects +# +# multiprocessing/managers.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# +from __future__ import absolute_import + +# +# Imports +# + +import sys +import threading +import array + +from traceback import format_exc + +from . import Process, current_process, active_children, Pool, util, connection +from .five import Queue, items, monotonic +from .process import AuthenticationString +from .forking import exit, Popen +from .reduction import ForkingPickler +from .util import Finalize, error, info + +__all__ = ['BaseManager', 'SyncManager', 'BaseProxy', 'Token'] + +# +# Register some things for pickling +# + + +def reduce_array(a): + return array.array, (a.typecode, a.tostring()) +ForkingPickler.register(array.array, reduce_array) + +view_types = [type(getattr({}, name)()) + for name in ('items', 'keys', 'values')] +if view_types[0] is not list: # only needed in Py3.0 + + def rebuild_as_list(obj): + return list, (list(obj), ) + for view_type in view_types: + ForkingPickler.register(view_type, rebuild_as_list) + try: + import copyreg + except ImportError: + pass + else: + copyreg.pickle(view_type, rebuild_as_list) + +# +# Type for identifying shared objects +# + + +class Token(object): + ''' + Type to uniquely indentify a shared object + ''' + __slots__ = ('typeid', 'address', 'id') + + def __init__(self, typeid, address, id): + (self.typeid, self.address, self.id) = (typeid, address, id) + + def __getstate__(self): + return (self.typeid, self.address, self.id) + + def __setstate__(self, state): + (self.typeid, self.address, self.id) = state + + def __repr__(self): + return 'Token(typeid=%r, address=%r, id=%r)' % \ + (self.typeid, self.address, self.id) + +# +# Function for communication with a manager's server process +# + + +def dispatch(c, id, methodname, args=(), kwds={}): + ''' + Send a message to manager using connection `c` and return response + ''' + c.send((id, methodname, args, kwds)) + kind, result = c.recv() + if kind == '#RETURN': + return result + raise convert_to_error(kind, result) + + +def convert_to_error(kind, result): + if kind == '#ERROR': + return result + elif kind == '#TRACEBACK': + assert type(result) is str + return RemoteError(result) + elif kind == '#UNSERIALIZABLE': + assert type(result) is str + return RemoteError('Unserializable message: %s\n' % result) + else: + return ValueError('Unrecognized message type') + + +class RemoteError(Exception): + + def __str__(self): + return ('\n' + '-' * 75 + '\n' + str(self.args[0]) + '-' * 75) + +# +# Functions for finding the method names of an object +# + + +def all_methods(obj): + ''' + Return a list of names of methods of `obj` + ''' + temp = [] + for name in dir(obj): + func = getattr(obj, name) + if callable(func): + temp.append(name) + return temp + + +def public_methods(obj): + ''' + Return a list of names of methods of `obj` which do not start with '_' + ''' + return [name for name in all_methods(obj) if name[0] != '_'] + +# +# Server which is run in a process controlled by a manager +# + + +class Server(object): + ''' + Server class which runs in a process controlled by a manager object + ''' + public = ['shutdown', 'create', 'accept_connection', 'get_methods', + 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] + + def __init__(self, registry, address, authkey, serializer): + assert isinstance(authkey, bytes) + self.registry = registry + self.authkey = AuthenticationString(authkey) + Listener, Client = listener_client[serializer] + + # do authentication later + self.listener = Listener(address=address, backlog=16) + self.address = self.listener.address + + self.id_to_obj = {'0': (None, ())} + self.id_to_refcount = {} + self.mutex = threading.RLock() + self.stop = 0 + + def serve_forever(self): + ''' + Run the server forever + ''' + current_process()._manager_server = self + try: + try: + while 1: + try: + c = self.listener.accept() + except (OSError, IOError): + continue + t = threading.Thread(target=self.handle_request, args=(c,)) + t.daemon = True + t.start() + except (KeyboardInterrupt, SystemExit): + pass + finally: + self.stop = 999 + self.listener.close() + + def handle_request(self, c): + ''' + Handle a new connection + ''' + funcname = result = request = None + try: + connection.deliver_challenge(c, self.authkey) + connection.answer_challenge(c, self.authkey) + request = c.recv() + ignore, funcname, args, kwds = request + assert funcname in self.public, '%r unrecognized' % funcname + func = getattr(self, funcname) + except Exception: + msg = ('#TRACEBACK', format_exc()) + else: + try: + result = func(c, *args, **kwds) + except Exception: + msg = ('#TRACEBACK', format_exc()) + else: + msg = ('#RETURN', result) + try: + c.send(msg) + except Exception as exc: + try: + c.send(('#TRACEBACK', format_exc())) + except Exception: + pass + info('Failure to send message: %r', msg) + info(' ... request was %r', request) + info(' ... exception was %r', exc) + + c.close() + + def serve_client(self, conn): + ''' + Handle requests from the proxies in a particular process/thread + ''' + util.debug('starting server thread to service %r', + threading.currentThread().name) + + recv = conn.recv + send = conn.send + id_to_obj = self.id_to_obj + + while not self.stop: + + try: + methodname = obj = None + request = recv() + ident, methodname, args, kwds = request + obj, exposed, gettypeid = id_to_obj[ident] + + if methodname not in exposed: + raise AttributeError( + 'method %r of %r object is not in exposed=%r' % ( + methodname, type(obj), exposed) + ) + + function = getattr(obj, methodname) + + try: + res = function(*args, **kwds) + except Exception as exc: + msg = ('#ERROR', exc) + else: + typeid = gettypeid and gettypeid.get(methodname, None) + if typeid: + rident, rexposed = self.create(conn, typeid, res) + token = Token(typeid, self.address, rident) + msg = ('#PROXY', (rexposed, token)) + else: + msg = ('#RETURN', res) + + except AttributeError: + if methodname is None: + msg = ('#TRACEBACK', format_exc()) + else: + try: + fallback_func = self.fallback_mapping[methodname] + result = fallback_func( + self, conn, ident, obj, *args, **kwds + ) + msg = ('#RETURN', result) + except Exception: + msg = ('#TRACEBACK', format_exc()) + + except EOFError: + util.debug('got EOF -- exiting thread serving %r', + threading.currentThread().name) + sys.exit(0) + + except Exception: + msg = ('#TRACEBACK', format_exc()) + + try: + try: + send(msg) + except Exception: + send(('#UNSERIALIZABLE', repr(msg))) + except Exception as exc: + info('exception in thread serving %r', + threading.currentThread().name) + info(' ... message was %r', msg) + info(' ... exception was %r', exc) + conn.close() + sys.exit(1) + + def fallback_getvalue(self, conn, ident, obj): + return obj + + def fallback_str(self, conn, ident, obj): + return str(obj) + + def fallback_repr(self, conn, ident, obj): + return repr(obj) + + fallback_mapping = { + '__str__': fallback_str, + '__repr__': fallback_repr, + '#GETVALUE': fallback_getvalue, + } + + def dummy(self, c): + pass + + def debug_info(self, c): + ''' + Return some info --- useful to spot problems with refcounting + ''' + with self.mutex: + result = [] + keys = list(self.id_to_obj.keys()) + keys.sort() + for ident in keys: + if ident != '0': + result.append(' %s: refcount=%s\n %s' % + (ident, self.id_to_refcount[ident], + str(self.id_to_obj[ident][0])[:75])) + return '\n'.join(result) + + def number_of_objects(self, c): + ''' + Number of shared objects + ''' + return len(self.id_to_obj) - 1 # don't count ident='0' + + def shutdown(self, c): + ''' + Shutdown this process + ''' + try: + try: + util.debug('manager received shutdown message') + c.send(('#RETURN', None)) + + if sys.stdout != sys.__stdout__: + util.debug('resetting stdout, stderr') + sys.stdout = sys.__stdout__ + sys.stderr = sys.__stderr__ + + util._run_finalizers(0) + + for p in active_children(): + util.debug('terminating a child process of manager') + p.terminate() + + for p in active_children(): + util.debug('terminating a child process of manager') + p.join() + + util._run_finalizers() + info('manager exiting with exitcode 0') + except: + if not error("Error while manager shutdown", exc_info=True): + import traceback + traceback.print_exc() + finally: + exit(0) + + def create(self, c, typeid, *args, **kwds): + ''' + Create a new shared object and return its id + ''' + with self.mutex: + callable, exposed, method_to_typeid, proxytype = \ + self.registry[typeid] + + if callable is None: + assert len(args) == 1 and not kwds + obj = args[0] + else: + obj = callable(*args, **kwds) + + if exposed is None: + exposed = public_methods(obj) + if method_to_typeid is not None: + assert type(method_to_typeid) is dict + exposed = list(exposed) + list(method_to_typeid) + # convert to string because xmlrpclib + # only has 32 bit signed integers + ident = '%x' % id(obj) + util.debug('%r callable returned object with id %r', typeid, ident) + + self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) + if ident not in self.id_to_refcount: + self.id_to_refcount[ident] = 0 + # increment the reference count immediately, to avoid + # this object being garbage collected before a Proxy + # object for it can be created. The caller of create() + # is responsible for doing a decref once the Proxy object + # has been created. + self.incref(c, ident) + return ident, tuple(exposed) + + def get_methods(self, c, token): + ''' + Return the methods of the shared object indicated by token + ''' + return tuple(self.id_to_obj[token.id][1]) + + def accept_connection(self, c, name): + ''' + Spawn a new thread to serve this connection + ''' + threading.currentThread().name = name + c.send(('#RETURN', None)) + self.serve_client(c) + + def incref(self, c, ident): + with self.mutex: + self.id_to_refcount[ident] += 1 + + def decref(self, c, ident): + with self.mutex: + assert self.id_to_refcount[ident] >= 1 + self.id_to_refcount[ident] -= 1 + if self.id_to_refcount[ident] == 0: + del self.id_to_obj[ident], self.id_to_refcount[ident] + util.debug('disposing of obj with id %r', ident) + +# +# Class to represent state of a manager +# + + +class State(object): + __slots__ = ['value'] + INITIAL = 0 + STARTED = 1 + SHUTDOWN = 2 + +# +# Mapping from serializer name to Listener and Client types +# + +listener_client = { + 'pickle': (connection.Listener, connection.Client), + 'xmlrpclib': (connection.XmlListener, connection.XmlClient), +} + +# +# Definition of BaseManager +# + + +class BaseManager(object): + ''' + Base class for managers + ''' + _registry = {} + _Server = Server + + def __init__(self, address=None, authkey=None, serializer='pickle'): + if authkey is None: + authkey = current_process().authkey + self._address = address # XXX not final address if eg ('', 0) + self._authkey = AuthenticationString(authkey) + self._state = State() + self._state.value = State.INITIAL + self._serializer = serializer + self._Listener, self._Client = listener_client[serializer] + + def __reduce__(self): + return (type(self).from_address, + (self._address, self._authkey, self._serializer)) + + def get_server(self): + ''' + Return server object with serve_forever() method and address attribute + ''' + assert self._state.value == State.INITIAL + return Server(self._registry, self._address, + self._authkey, self._serializer) + + def connect(self): + ''' + Connect manager object to the server process + ''' + Listener, Client = listener_client[self._serializer] + conn = Client(self._address, authkey=self._authkey) + dispatch(conn, None, 'dummy') + self._state.value = State.STARTED + + def start(self, initializer=None, initargs=()): + ''' + Spawn a server process for this manager object + ''' + assert self._state.value == State.INITIAL + + if initializer is not None and not callable(initializer): + raise TypeError('initializer must be a callable') + + # pipe over which we will retrieve address of server + reader, writer = connection.Pipe(duplex=False) + + # spawn process which runs a server + self._process = Process( + target=type(self)._run_server, + args=(self._registry, self._address, self._authkey, + self._serializer, writer, initializer, initargs), + ) + ident = ':'.join(str(i) for i in self._process._identity) + self._process.name = type(self).__name__ + '-' + ident + self._process.start() + + # get address of server + writer.close() + self._address = reader.recv() + reader.close() + + # register a finalizer + self._state.value = State.STARTED + self.shutdown = Finalize( + self, type(self)._finalize_manager, + args=(self._process, self._address, self._authkey, + self._state, self._Client), + exitpriority=0 + ) + + @classmethod + def _run_server(cls, registry, address, authkey, serializer, writer, + initializer=None, initargs=()): + ''' + Create a server, report its address and run it + ''' + if initializer is not None: + initializer(*initargs) + + # create server + server = cls._Server(registry, address, authkey, serializer) + + # inform parent process of the server's address + writer.send(server.address) + writer.close() + + # run the manager + info('manager serving at %r', server.address) + server.serve_forever() + + def _create(self, typeid, *args, **kwds): + ''' + Create a new shared object; return the token and exposed tuple + ''' + assert self._state.value == State.STARTED, 'server not yet started' + conn = self._Client(self._address, authkey=self._authkey) + try: + id, exposed = dispatch(conn, None, 'create', + (typeid,) + args, kwds) + finally: + conn.close() + return Token(typeid, self._address, id), exposed + + def join(self, timeout=None): + ''' + Join the manager process (if it has been spawned) + ''' + self._process.join(timeout) + + def _debug_info(self): + ''' + Return some info about the servers shared objects and connections + ''' + conn = self._Client(self._address, authkey=self._authkey) + try: + return dispatch(conn, None, 'debug_info') + finally: + conn.close() + + def _number_of_objects(self): + ''' + Return the number of shared objects + ''' + conn = self._Client(self._address, authkey=self._authkey) + try: + return dispatch(conn, None, 'number_of_objects') + finally: + conn.close() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.shutdown() + + @staticmethod + def _finalize_manager(process, address, authkey, state, _Client): + ''' + Shutdown the manager process; will be registered as a finalizer + ''' + if process.is_alive(): + info('sending shutdown message to manager') + try: + conn = _Client(address, authkey=authkey) + try: + dispatch(conn, None, 'shutdown') + finally: + conn.close() + except Exception: + pass + + process.join(timeout=0.2) + if process.is_alive(): + info('manager still alive') + if hasattr(process, 'terminate'): + info('trying to `terminate()` manager process') + process.terminate() + process.join(timeout=0.1) + if process.is_alive(): + info('manager still alive after terminate') + + state.value = State.SHUTDOWN + try: + del BaseProxy._address_to_local[address] + except KeyError: + pass + + address = property(lambda self: self._address) + + @classmethod + def register(cls, typeid, callable=None, proxytype=None, exposed=None, + method_to_typeid=None, create_method=True): + ''' + Register a typeid with the manager type + ''' + if '_registry' not in cls.__dict__: + cls._registry = cls._registry.copy() + + if proxytype is None: + proxytype = AutoProxy + + exposed = exposed or getattr(proxytype, '_exposed_', None) + + method_to_typeid = ( + method_to_typeid or + getattr(proxytype, '_method_to_typeid_', None) + ) + + if method_to_typeid: + for key, value in items(method_to_typeid): + assert type(key) is str, '%r is not a string' % key + assert type(value) is str, '%r is not a string' % value + + cls._registry[typeid] = ( + callable, exposed, method_to_typeid, proxytype + ) + + if create_method: + def temp(self, *args, **kwds): + util.debug('requesting creation of a shared %r object', typeid) + token, exp = self._create(typeid, *args, **kwds) + proxy = proxytype( + token, self._serializer, manager=self, + authkey=self._authkey, exposed=exp + ) + conn = self._Client(token.address, authkey=self._authkey) + dispatch(conn, None, 'decref', (token.id,)) + return proxy + temp.__name__ = typeid + setattr(cls, typeid, temp) + +# +# Subclass of set which get cleared after a fork +# + + +class ProcessLocalSet(set): + + def __init__(self): + util.register_after_fork(self, lambda obj: obj.clear()) + + def __reduce__(self): + return type(self), () + +# +# Definition of BaseProxy +# + + +class BaseProxy(object): + ''' + A base for proxies of shared objects + ''' + _address_to_local = {} + _mutex = util.ForkAwareThreadLock() + + def __init__(self, token, serializer, manager=None, + authkey=None, exposed=None, incref=True): + BaseProxy._mutex.acquire() + try: + tls_idset = BaseProxy._address_to_local.get(token.address, None) + if tls_idset is None: + tls_idset = util.ForkAwareLocal(), ProcessLocalSet() + BaseProxy._address_to_local[token.address] = tls_idset + finally: + BaseProxy._mutex.release() + + # self._tls is used to record the connection used by this + # thread to communicate with the manager at token.address + self._tls = tls_idset[0] + + # self._idset is used to record the identities of all shared + # objects for which the current process owns references and + # which are in the manager at token.address + self._idset = tls_idset[1] + + self._token = token + self._id = self._token.id + self._manager = manager + self._serializer = serializer + self._Client = listener_client[serializer][1] + + if authkey is not None: + self._authkey = AuthenticationString(authkey) + elif self._manager is not None: + self._authkey = self._manager._authkey + else: + self._authkey = current_process().authkey + + if incref: + self._incref() + + util.register_after_fork(self, BaseProxy._after_fork) + + def _connect(self): + util.debug('making connection to manager') + name = current_process().name + if threading.currentThread().name != 'MainThread': + name += '|' + threading.currentThread().name + conn = self._Client(self._token.address, authkey=self._authkey) + dispatch(conn, None, 'accept_connection', (name,)) + self._tls.connection = conn + + def _callmethod(self, methodname, args=(), kwds={}): + ''' + Try to call a method of the referrent and return a copy of the result + ''' + try: + conn = self._tls.connection + except AttributeError: + util.debug('thread %r does not own a connection', + threading.currentThread().name) + self._connect() + conn = self._tls.connection + + conn.send((self._id, methodname, args, kwds)) + kind, result = conn.recv() + + if kind == '#RETURN': + return result + elif kind == '#PROXY': + exposed, token = result + proxytype = self._manager._registry[token.typeid][-1] + proxy = proxytype( + token, self._serializer, manager=self._manager, + authkey=self._authkey, exposed=exposed + ) + conn = self._Client(token.address, authkey=self._authkey) + dispatch(conn, None, 'decref', (token.id,)) + return proxy + raise convert_to_error(kind, result) + + def _getvalue(self): + ''' + Get a copy of the value of the referent + ''' + return self._callmethod('#GETVALUE') + + def _incref(self): + conn = self._Client(self._token.address, authkey=self._authkey) + dispatch(conn, None, 'incref', (self._id,)) + util.debug('INCREF %r', self._token.id) + + self._idset.add(self._id) + + state = self._manager and self._manager._state + + self._close = Finalize( + self, BaseProxy._decref, + args=(self._token, self._authkey, state, + self._tls, self._idset, self._Client), + exitpriority=10 + ) + + @staticmethod + def _decref(token, authkey, state, tls, idset, _Client): + idset.discard(token.id) + + # check whether manager is still alive + if state is None or state.value == State.STARTED: + # tell manager this process no longer cares about referent + try: + util.debug('DECREF %r', token.id) + conn = _Client(token.address, authkey=authkey) + dispatch(conn, None, 'decref', (token.id,)) + except Exception as exc: + util.debug('... decref failed %s', exc) + + else: + util.debug('DECREF %r -- manager already shutdown', token.id) + + # check whether we can close this thread's connection because + # the process owns no more references to objects for this manager + if not idset and hasattr(tls, 'connection'): + util.debug('thread %r has no more proxies so closing conn', + threading.currentThread().name) + tls.connection.close() + del tls.connection + + def _after_fork(self): + self._manager = None + try: + self._incref() + except Exception as exc: + # the proxy may just be for a manager which has shutdown + info('incref failed: %s', exc) + + def __reduce__(self): + kwds = {} + if Popen.thread_is_spawning(): + kwds['authkey'] = self._authkey + + if getattr(self, '_isauto', False): + kwds['exposed'] = self._exposed_ + return (RebuildProxy, + (AutoProxy, self._token, self._serializer, kwds)) + else: + return (RebuildProxy, + (type(self), self._token, self._serializer, kwds)) + + def __deepcopy__(self, memo): + return self._getvalue() + + def __repr__(self): + return '<%s object, typeid %r at %s>' % \ + (type(self).__name__, self._token.typeid, '0x%x' % id(self)) + + def __str__(self): + ''' + Return representation of the referent (or a fall-back if that fails) + ''' + try: + return self._callmethod('__repr__') + except Exception: + return repr(self)[:-1] + "; '__str__()' failed>" + +# +# Function used for unpickling +# + + +def RebuildProxy(func, token, serializer, kwds): + ''' + Function used for unpickling proxy objects. + + If possible the shared object is returned, or otherwise a proxy for it. + ''' + server = getattr(current_process(), '_manager_server', None) + + if server and server.address == token.address: + return server.id_to_obj[token.id][0] + else: + incref = ( + kwds.pop('incref', True) and + not getattr(current_process(), '_inheriting', False) + ) + return func(token, serializer, incref=incref, **kwds) + +# +# Functions to create proxies and proxy types +# + + +def MakeProxyType(name, exposed, _cache={}): + ''' + Return an proxy type whose methods are given by `exposed` + ''' + exposed = tuple(exposed) + try: + return _cache[(name, exposed)] + except KeyError: + pass + + dic = {} + + for meth in exposed: + exec('''def %s(self, *args, **kwds): + return self._callmethod(%r, args, kwds)''' % (meth, meth), dic) + + ProxyType = type(name, (BaseProxy,), dic) + ProxyType._exposed_ = exposed + _cache[(name, exposed)] = ProxyType + return ProxyType + + +def AutoProxy(token, serializer, manager=None, authkey=None, + exposed=None, incref=True): + ''' + Return an auto-proxy for `token` + ''' + _Client = listener_client[serializer][1] + + if exposed is None: + conn = _Client(token.address, authkey=authkey) + try: + exposed = dispatch(conn, None, 'get_methods', (token,)) + finally: + conn.close() + + if authkey is None and manager is not None: + authkey = manager._authkey + if authkey is None: + authkey = current_process().authkey + + ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) + proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, + incref=incref) + proxy._isauto = True + return proxy + +# +# Types/callables which we will register with SyncManager +# + + +class Namespace(object): + + def __init__(self, **kwds): + self.__dict__.update(kwds) + + def __repr__(self): + items = list(self.__dict__.items()) + temp = [] + for name, value in items: + if not name.startswith('_'): + temp.append('%s=%r' % (name, value)) + temp.sort() + return 'Namespace(%s)' % str.join(', ', temp) + + +class Value(object): + + def __init__(self, typecode, value, lock=True): + self._typecode = typecode + self._value = value + + def get(self): + return self._value + + def set(self, value): + self._value = value + + def __repr__(self): + return '%s(%r, %r)' % (type(self).__name__, + self._typecode, self._value) + value = property(get, set) + + +def Array(typecode, sequence, lock=True): + return array.array(typecode, sequence) + +# +# Proxy types used by SyncManager +# + + +class IteratorProxy(BaseProxy): + if sys.version_info[0] == 3: + _exposed = ('__next__', 'send', 'throw', 'close') + else: + _exposed_ = ('__next__', 'next', 'send', 'throw', 'close') + + def next(self, *args): + return self._callmethod('next', args) + + def __iter__(self): + return self + + def __next__(self, *args): + return self._callmethod('__next__', args) + + def send(self, *args): + return self._callmethod('send', args) + + def throw(self, *args): + return self._callmethod('throw', args) + + def close(self, *args): + return self._callmethod('close', args) + + +class AcquirerProxy(BaseProxy): + _exposed_ = ('acquire', 'release') + + def acquire(self, blocking=True): + return self._callmethod('acquire', (blocking,)) + + def release(self): + return self._callmethod('release') + + def __enter__(self): + return self._callmethod('acquire') + + def __exit__(self, exc_type, exc_val, exc_tb): + return self._callmethod('release') + + +class ConditionProxy(AcquirerProxy): + _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') + + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + + def notify(self): + return self._callmethod('notify') + + def notify_all(self): + return self._callmethod('notify_all') + + def wait_for(self, predicate, timeout=None): + result = predicate() + if result: + return result + if timeout is not None: + endtime = monotonic() + timeout + else: + endtime = None + waittime = None + while not result: + if endtime is not None: + waittime = endtime - monotonic() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + + +class EventProxy(BaseProxy): + _exposed_ = ('is_set', 'set', 'clear', 'wait') + + def is_set(self): + return self._callmethod('is_set') + + def set(self): + return self._callmethod('set') + + def clear(self): + return self._callmethod('clear') + + def wait(self, timeout=None): + return self._callmethod('wait', (timeout,)) + + +class NamespaceProxy(BaseProxy): + _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') + + def __getattr__(self, key): + if key[0] == '_': + return object.__getattribute__(self, key) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__getattribute__', (key,)) + + def __setattr__(self, key, value): + if key[0] == '_': + return object.__setattr__(self, key, value) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__setattr__', (key, value)) + + def __delattr__(self, key): + if key[0] == '_': + return object.__delattr__(self, key) + callmethod = object.__getattribute__(self, '_callmethod') + return callmethod('__delattr__', (key,)) + + +class ValueProxy(BaseProxy): + _exposed_ = ('get', 'set') + + def get(self): + return self._callmethod('get') + + def set(self, value): + return self._callmethod('set', (value,)) + value = property(get, set) + + +BaseListProxy = MakeProxyType('BaseListProxy', ( + '__add__', '__contains__', '__delitem__', '__delslice__', + '__getitem__', '__getslice__', '__len__', '__mul__', + '__reversed__', '__rmul__', '__setitem__', '__setslice__', + 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', + 'reverse', 'sort', '__imul__', +)) # XXX __getslice__ and __setslice__ unneeded in Py3.0 + + +class ListProxy(BaseListProxy): + + def __iadd__(self, value): + self._callmethod('extend', (value,)) + return self + + def __imul__(self, value): + self._callmethod('__imul__', (value,)) + return self + + +DictProxy = MakeProxyType('DictProxy', ( + '__contains__', '__delitem__', '__getitem__', '__len__', + '__setitem__', 'clear', 'copy', 'get', 'has_key', 'items', + 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values', +)) + + +ArrayProxy = MakeProxyType('ArrayProxy', ( + '__len__', '__getitem__', '__setitem__', '__getslice__', '__setslice__', +)) # XXX __getslice__ and __setslice__ unneeded in Py3.0 + + +PoolProxy = MakeProxyType('PoolProxy', ( + 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', + 'map', 'map_async', 'starmap', 'starmap_async', 'terminate', +)) +PoolProxy._method_to_typeid_ = { + 'apply_async': 'AsyncResult', + 'map_async': 'AsyncResult', + 'starmap_async': 'AsyncResult', + 'imap': 'Iterator', + 'imap_unordered': 'Iterator', +} + +# +# Definition of SyncManager +# + + +class SyncManager(BaseManager): + ''' + Subclass of `BaseManager` which supports a number of shared object types. + + The types registered are those intended for the synchronization + of threads, plus `dict`, `list` and `Namespace`. + + The `billiard.Manager()` function creates started instances of + this class. + ''' + +SyncManager.register('Queue', Queue) +SyncManager.register('JoinableQueue', Queue) +SyncManager.register('Event', threading.Event, EventProxy) +SyncManager.register('Lock', threading.Lock, AcquirerProxy) +SyncManager.register('RLock', threading.RLock, AcquirerProxy) +SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) +SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, + AcquirerProxy) +SyncManager.register('Condition', threading.Condition, ConditionProxy) +SyncManager.register('Pool', Pool, PoolProxy) +SyncManager.register('list', list, ListProxy) +SyncManager.register('dict', dict, DictProxy) +SyncManager.register('Value', Value, ValueProxy) +SyncManager.register('Array', Array, ArrayProxy) +SyncManager.register('Namespace', Namespace, NamespaceProxy) + +# types returned by methods of PoolProxy +SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) +SyncManager.register('AsyncResult', create_method=False) diff --git a/thesisenv/lib/python3.6/site-packages/billiard/pool.py b/thesisenv/lib/python3.6/site-packages/billiard/pool.py new file mode 100644 index 0000000..f2e2d11 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/pool.py @@ -0,0 +1,1959 @@ +# -*- coding: utf-8 -*- +# +# Module providing the `Pool` class for managing a process pool +# +# multiprocessing/pool.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# +from __future__ import absolute_import + +# +# Imports +# + +import errno +import itertools +import os +import platform +import signal +import sys +import threading +import time +import warnings + +from collections import deque +from functools import partial + +from . import Event, Process, cpu_count +from . import util +from .common import pickle_loads, reset_signals, restart_state +from .compat import get_errno, send_offset +from .einfo import ExceptionInfo +from .dummy import DummyProcess +from .exceptions import ( + CoroStop, + RestartFreqExceeded, + SoftTimeLimitExceeded, + Terminated, + TimeLimitExceeded, + TimeoutError, + WorkerLostError, +) +from .five import Empty, Queue, range, values, reraise, monotonic +from .util import Finalize, debug + +PY3 = sys.version_info[0] == 3 + +if platform.system() == 'Windows': # pragma: no cover + # On Windows os.kill calls TerminateProcess which cannot be + # handled by # any process, so this is needed to terminate the task + # *and its children* (if any). + from ._win import kill_processtree as _kill # noqa + SIGKILL = signal.SIGTERM +else: + from os import kill as _kill # noqa + SIGKILL = signal.SIGKILL + + +try: + TIMEOUT_MAX = threading.TIMEOUT_MAX +except AttributeError: # pragma: no cover + TIMEOUT_MAX = 1e10 # noqa + + +if sys.version_info >= (3, 3): + _Semaphore = threading.Semaphore +else: + # Semaphore is a factory function pointing to _Semaphore + _Semaphore = threading._Semaphore # noqa + +SIGMAP = dict( + (getattr(signal, n), n) for n in dir(signal) if n.startswith('SIG') +) + +# +# Constants representing the state of a pool +# + +RUN = 0 +CLOSE = 1 +TERMINATE = 2 + +# +# Constants representing the state of a job +# + +ACK = 0 +READY = 1 +TASK = 2 +NACK = 3 +DEATH = 4 + +# +# Exit code constants +# +EX_OK = 0 +EX_FAILURE = 1 +EX_RECYCLE = 0x9B + + +# Signal used for soft time limits. +SIG_SOFT_TIMEOUT = getattr(signal, "SIGUSR1", None) + +# +# Miscellaneous +# + +LOST_WORKER_TIMEOUT = 10.0 +EX_OK = getattr(os, "EX_OK", 0) + +job_counter = itertools.count() + +Lock = threading.Lock + + +def _get_send_offset(connection): + try: + native = connection.send_offset + except AttributeError: + native = None + if native is None: + return partial(send_offset, connection.fileno()) + return native + + +def human_status(status): + if (status or 0) < 0: + try: + return 'signal {0} ({1})'.format(-status, SIGMAP[-status]) + except KeyError: + return 'signal {0}'.format(-status) + return 'exitcode {0}'.format(status) + + +def mapstar(args): + return list(map(*args)) + + +def starmapstar(args): + return list(itertools.starmap(args[0], args[1])) + + +def error(msg, *args, **kwargs): + if util._logger: + util._logger.error(msg, *args, **kwargs) + + +def stop_if_not_current(thread, timeout=None): + if thread is not threading.currentThread(): + thread.stop(timeout) + + +class LaxBoundedSemaphore(_Semaphore): + """Semaphore that checks that # release is <= # acquires, + but ignores if # releases >= value.""" + + def __init__(self, value=1, verbose=None): + if PY3: + _Semaphore.__init__(self, value) + else: + _Semaphore.__init__(self, value, verbose) + self._initial_value = value + + def shrink(self): + self._initial_value -= 1 + self.acquire() + + if PY3: + + def release(self): + cond = self._cond + with cond: + if self._value < self._initial_value: + self._value += 1 + cond.notify_all() + + def clear(self): + while self._value < self._initial_value: + _Semaphore.release(self) + + def grow(self): + with self._cond: + self._initial_value += 1 + self._value += 1 + self._cond.notify() + + else: + + def grow(self): + cond = self._Semaphore__cond + with cond: + self._initial_value += 1 + self._Semaphore__value += 1 + cond.notify() + + def release(self): # noqa + cond = self._Semaphore__cond + with cond: + if self._Semaphore__value < self._initial_value: + self._Semaphore__value += 1 + cond.notifyAll() + + def clear(self): # noqa + while self._Semaphore__value < self._initial_value: + _Semaphore.release(self) + +# +# Exceptions +# + + +class MaybeEncodingError(Exception): + """Wraps possible unpickleable errors, so they can be + safely sent through the socket.""" + + def __init__(self, exc, value): + self.exc = repr(exc) + self.value = repr(value) + super(MaybeEncodingError, self).__init__(self.exc, self.value) + + def __repr__(self): + return "" % str(self) + + def __str__(self): + return "Error sending result: '%r'. Reason: '%r'." % ( + self.value, self.exc) + + +class WorkersJoined(Exception): + """All workers have terminated.""" + + +def soft_timeout_sighandler(signum, frame): + raise SoftTimeLimitExceeded() + +# +# Code run by worker processes +# + + +class Worker(Process): + _controlled_termination = False + _job_terminated = False + + def __init__(self, inq, outq, synq=None, initializer=None, initargs=(), + maxtasks=None, sentinel=None, on_exit=None, + sigprotection=True): + assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0) + self.initializer = initializer + self.initargs = initargs + self.maxtasks = maxtasks + self._shutdown = sentinel + self.on_exit = on_exit + self.sigprotection = sigprotection + self.inq, self.outq, self.synq = inq, outq, synq + self._make_shortcuts() + + super(Worker, self).__init__() + + def __reduce__(self): + return self.__class__, ( + self.inq, self.outq, self.synq, self.initializer, + self.initargs, self.maxtasks, self._shutdown, + ) + + def _make_shortcuts(self): + self.inqW_fd = self.inq._writer.fileno() # inqueue write fd + self.outqR_fd = self.outq._reader.fileno() # outqueue read fd + if self.synq: + self.synqR_fd = self.synq._reader.fileno() # synqueue read fd + self.synqW_fd = self.synq._writer.fileno() # synqueue write fd + self.send_syn_offset = _get_send_offset(self.synq._writer) + else: + self.synqR_fd = self.synqW_fd = self._send_syn_offset = None + self._quick_put = self.inq._writer.send + self._quick_get = self.outq._reader.recv + self.send_job_offset = _get_send_offset(self.inq._writer) + + def run(self): + _exit = sys.exit + _exitcode = [None] + + def exit(status=None): + _exitcode[0] = status + return _exit() + sys.exit = exit + + pid = os.getpid() + + self._make_child_methods() + self.after_fork() + self.on_loop_start(pid=pid) # callback on loop start + try: + sys.exit(self.workloop(pid=pid)) + except Exception as exc: + error('Pool process %r error: %r', self, exc, exc_info=1) + self._do_exit(pid, _exitcode[0], exc) + finally: + self._do_exit(pid, _exitcode[0], None) + + def _do_exit(self, pid, exitcode, exc=None): + if exitcode is None: + exitcode = EX_FAILURE if exc else EX_OK + + if self.on_exit is not None: + self.on_exit(pid, exitcode) + + if sys.platform != 'win32': + try: + self.outq.put((DEATH, (pid, exitcode))) + time.sleep(1) + finally: + os._exit(exitcode) + else: + os._exit(exitcode) + + def on_loop_start(self, pid): + pass + + def terminate_controlled(self): + self._controlled_termination = True + self.terminate() + + def prepare_result(self, result): + return result + + def workloop(self, debug=debug, now=monotonic, pid=None): + pid = pid or os.getpid() + put = self.outq.put + inqW_fd = self.inqW_fd + synqW_fd = self.synqW_fd + maxtasks = self.maxtasks + prepare_result = self.prepare_result + + wait_for_job = self.wait_for_job + _wait_for_syn = self.wait_for_syn + + def wait_for_syn(jid): + i = 0 + while 1: + if i > 60: + error('!!!WAIT FOR ACK TIMEOUT: job:%r fd:%r!!!', + jid, self.synq._reader.fileno(), exc_info=1) + req = _wait_for_syn() + if req: + type_, args = req + if type_ == NACK: + return False + assert type_ == ACK + return True + i += 1 + + completed = 0 + while maxtasks is None or (maxtasks and completed < maxtasks): + req = wait_for_job() + if req: + type_, args_ = req + assert type_ == TASK + job, i, fun, args, kwargs = args_ + put((ACK, (job, i, now(), pid, synqW_fd))) + if _wait_for_syn: + confirm = wait_for_syn(job) + if not confirm: + continue # received NACK + try: + result = (True, prepare_result(fun(*args, **kwargs))) + except Exception: + result = (False, ExceptionInfo()) + try: + put((READY, (job, i, result, inqW_fd))) + except Exception as exc: + _, _, tb = sys.exc_info() + try: + wrapped = MaybeEncodingError(exc, result[1]) + einfo = ExceptionInfo(( + MaybeEncodingError, wrapped, tb, + )) + put((READY, (job, i, (False, einfo), inqW_fd))) + finally: + del(tb) + completed += 1 + debug('worker exiting after %d tasks', completed) + if maxtasks: + return EX_RECYCLE if completed == maxtasks else EX_FAILURE + return EX_OK + + def after_fork(self): + if hasattr(self.inq, '_writer'): + self.inq._writer.close() + if hasattr(self.outq, '_reader'): + self.outq._reader.close() + + if self.initializer is not None: + self.initializer(*self.initargs) + + # Make sure all exiting signals call finally: blocks. + # This is important for the semaphore to be released. + reset_signals(full=self.sigprotection) + + # install signal handler for soft timeouts. + if SIG_SOFT_TIMEOUT is not None: + signal.signal(SIG_SOFT_TIMEOUT, soft_timeout_sighandler) + + try: + signal.signal(signal.SIGINT, signal.SIG_IGN) + except AttributeError: + pass + + def _make_recv_method(self, conn): + get = conn.get + + if hasattr(conn, '_reader'): + _poll = conn._reader.poll + if hasattr(conn, 'get_payload') and conn.get_payload: + get_payload = conn.get_payload + + def _recv(timeout, loads=pickle_loads): + return True, loads(get_payload()) + else: + def _recv(timeout): # noqa + if _poll(timeout): + return True, get() + return False, None + else: + def _recv(timeout): # noqa + try: + return True, get(timeout=timeout) + except Queue.Empty: + return False, None + return _recv + + def _make_child_methods(self, loads=pickle_loads): + self.wait_for_job = self._make_protected_receive(self.inq) + self.wait_for_syn = (self._make_protected_receive(self.synq) + if self.synq else None) + + def _make_protected_receive(self, conn): + _receive = self._make_recv_method(conn) + should_shutdown = self._shutdown.is_set if self._shutdown else None + + def receive(debug=debug): + if should_shutdown and should_shutdown(): + debug('worker got sentinel -- exiting') + raise SystemExit(EX_OK) + try: + ready, req = _receive(1.0) + if not ready: + return None + except (EOFError, IOError) as exc: + if get_errno(exc) == errno.EINTR: + return None # interrupted, maybe by gdb + debug('worker got %s -- exiting', type(exc).__name__) + raise SystemExit(EX_FAILURE) + if req is None: + debug('worker got sentinel -- exiting') + raise SystemExit(EX_FAILURE) + return req + + return receive + + +# +# Class representing a process pool +# + + +class PoolThread(DummyProcess): + + def __init__(self, *args, **kwargs): + DummyProcess.__init__(self) + self._state = RUN + self._was_started = False + self.daemon = True + + def run(self): + try: + return self.body() + except RestartFreqExceeded as exc: + error("Thread %r crashed: %r", type(self).__name__, exc, + exc_info=1) + _kill(os.getpid(), signal.SIGTERM) + sys.exit() + except Exception as exc: + error("Thread %r crashed: %r", type(self).__name__, exc, + exc_info=1) + os._exit(1) + + def start(self, *args, **kwargs): + self._was_started = True + super(PoolThread, self).start(*args, **kwargs) + + def on_stop_not_started(self): + pass + + def stop(self, timeout=None): + if self._was_started: + self.join(timeout) + return + self.on_stop_not_started() + + def terminate(self): + self._state = TERMINATE + + def close(self): + self._state = CLOSE + + +class Supervisor(PoolThread): + + def __init__(self, pool): + self.pool = pool + super(Supervisor, self).__init__() + + def body(self): + debug('worker handler starting') + + time.sleep(0.8) + + pool = self.pool + + try: + # do a burst at startup to verify that we can start + # our pool processes, and in that time we lower + # the max restart frequency. + prev_state = pool.restart_state + pool.restart_state = restart_state(10 * pool._processes, 1) + for _ in range(10): + if self._state == RUN and pool._state == RUN: + pool._maintain_pool() + time.sleep(0.1) + + # Keep maintaing workers until the cache gets drained, unless + # the pool is termianted + pool.restart_state = prev_state + while self._state == RUN and pool._state == RUN: + pool._maintain_pool() + time.sleep(0.8) + except RestartFreqExceeded: + pool.close() + pool.join() + raise + debug('worker handler exiting') + + +class TaskHandler(PoolThread): + + def __init__(self, taskqueue, put, outqueue, pool): + self.taskqueue = taskqueue + self.put = put + self.outqueue = outqueue + self.pool = pool + super(TaskHandler, self).__init__() + + def body(self): + taskqueue = self.taskqueue + put = self.put + + for taskseq, set_length in iter(taskqueue.get, None): + try: + i = -1 + for i, task in enumerate(taskseq): + if self._state: + debug('task handler found thread._state != RUN') + break + try: + put(task) + except IOError: + debug('could not put task on queue') + break + else: + if set_length: + debug('doing set_length()') + set_length(i + 1) + continue + break + except Exception as exc: + error('Task Handler ERROR: %r', exc, exc_info=1) + break + else: + debug('task handler got sentinel') + + self.tell_others() + + def tell_others(self): + outqueue = self.outqueue + put = self.put + pool = self.pool + + try: + # tell result handler to finish when cache is empty + debug('task handler sending sentinel to result handler') + outqueue.put(None) + + # tell workers there is no more work + debug('task handler sending sentinel to workers') + for p in pool: + put(None) + except IOError: + debug('task handler got IOError when sending sentinels') + + debug('task handler exiting') + + def on_stop_not_started(self): + self.tell_others() + + +class TimeoutHandler(PoolThread): + + def __init__(self, processes, cache, t_soft, t_hard): + self.processes = processes + self.cache = cache + self.t_soft = t_soft + self.t_hard = t_hard + self._it = None + super(TimeoutHandler, self).__init__() + + def _process_by_pid(self, pid): + return next(( + (proc, i) for i, proc in enumerate(self.processes) + if proc.pid == pid + ), (None, None)) + + def on_soft_timeout(self, job): + debug('soft time limit exceeded for %r', job) + process, _index = self._process_by_pid(job._worker_pid) + if not process: + return + + # Run timeout callback + job.handle_timeout(soft=True) + + try: + _kill(job._worker_pid, SIG_SOFT_TIMEOUT) + except OSError as exc: + if get_errno(exc) != errno.ESRCH: + raise + + def on_hard_timeout(self, job): + if job.ready(): + return + debug('hard time limit exceeded for %r', job) + # Remove from cache and set return value to an exception + try: + raise TimeLimitExceeded(job._timeout) + except TimeLimitExceeded: + job._set(job._job, (False, ExceptionInfo())) + else: # pragma: no cover + pass + + # Remove from _pool + process, _index = self._process_by_pid(job._worker_pid) + + # Run timeout callback + job.handle_timeout(soft=False) + + if process: + self._trywaitkill(process) + + def _trywaitkill(self, worker): + debug('timeout: sending TERM to %s', worker._name) + try: + worker.terminate() + except OSError: + pass + else: + if worker._popen.wait(timeout=0.1): + return + debug('timeout: TERM timed-out, now sending KILL to %s', worker._name) + try: + _kill(worker.pid, SIGKILL) + except OSError: + pass + + def handle_timeouts(self): + cache = self.cache + t_hard, t_soft = self.t_hard, self.t_soft + dirty = set() + on_soft_timeout = self.on_soft_timeout + on_hard_timeout = self.on_hard_timeout + + def _timed_out(start, timeout): + if not start or not timeout: + return False + if monotonic() >= start + timeout: + return True + + # Inner-loop + while self._state == RUN: + + # Remove dirty items not in cache anymore + if dirty: + dirty = set(k for k in dirty if k in cache) + + for i, job in list(cache.items()): + ack_time = job._time_accepted + soft_timeout = job._soft_timeout + if soft_timeout is None: + soft_timeout = t_soft + hard_timeout = job._timeout + if hard_timeout is None: + hard_timeout = t_hard + if _timed_out(ack_time, hard_timeout): + on_hard_timeout(job) + elif i not in dirty and _timed_out(ack_time, soft_timeout): + on_soft_timeout(job) + dirty.add(i) + yield + + def body(self): + while self._state == RUN: + try: + for _ in self.handle_timeouts(): + time.sleep(1.0) # don't spin + except CoroStop: + break + debug('timeout handler exiting') + + def handle_event(self, *args): + if self._it is None: + self._it = self.handle_timeouts() + try: + next(self._it) + except StopIteration: + self._it = None + + +class ResultHandler(PoolThread): + + def __init__(self, outqueue, get, cache, poll, + join_exited_workers, putlock, restart_state, + check_timeouts, on_job_ready): + self.outqueue = outqueue + self.get = get + self.cache = cache + self.poll = poll + self.join_exited_workers = join_exited_workers + self.putlock = putlock + self.restart_state = restart_state + self._it = None + self._shutdown_complete = False + self.check_timeouts = check_timeouts + self.on_job_ready = on_job_ready + self._make_methods() + super(ResultHandler, self).__init__() + + def on_stop_not_started(self): + # used when pool started without result handler thread. + self.finish_at_shutdown(handle_timeouts=True) + + def _make_methods(self): + cache = self.cache + putlock = self.putlock + restart_state = self.restart_state + on_job_ready = self.on_job_ready + + def on_ack(job, i, time_accepted, pid, synqW_fd): + restart_state.R = 0 + try: + cache[job]._ack(i, time_accepted, pid, synqW_fd) + except (KeyError, AttributeError): + # Object gone or doesn't support _ack (e.g. IMAPIterator). + pass + + def on_ready(job, i, obj, inqW_fd): + if on_job_ready is not None: + on_job_ready(job, i, obj, inqW_fd) + try: + item = cache[job] + except KeyError: + return + if not item.ready(): + if putlock is not None: + putlock.release() + try: + item._set(i, obj) + except KeyError: + pass + + def on_death(pid, exitcode): + try: + os.kill(pid, signal.SIGTERM) + except OSError as exc: + if get_errno(exc) != errno.ESRCH: + raise + + state_handlers = self.state_handlers = { + ACK: on_ack, READY: on_ready, DEATH: on_death + } + + def on_state_change(task): + state, args = task + try: + state_handlers[state](*args) + except KeyError: + debug("Unknown job state: %s (args=%s)", state, args) + self.on_state_change = on_state_change + + def _process_result(self, timeout=1.0): + poll = self.poll + on_state_change = self.on_state_change + + while 1: + try: + ready, task = poll(timeout) + except (IOError, EOFError) as exc: + debug('result handler got %r -- exiting', exc) + raise CoroStop() + + if self._state: + assert self._state == TERMINATE + debug('result handler found thread._state=TERMINATE') + raise CoroStop() + + if ready: + if task is None: + debug('result handler got sentinel') + raise CoroStop() + on_state_change(task) + if timeout != 0: # blocking + break + else: + break + yield + + def handle_event(self, fileno=None, events=None): + if self._state == RUN: + if self._it is None: + self._it = self._process_result(0) # non-blocking + try: + next(self._it) + except (StopIteration, CoroStop): + self._it = None + + def body(self): + debug('result handler starting') + try: + while self._state == RUN: + try: + for _ in self._process_result(1.0): # blocking + pass + except CoroStop: + break + finally: + self.finish_at_shutdown() + + def finish_at_shutdown(self, handle_timeouts=False): + self._shutdown_complete = True + get = self.get + outqueue = self.outqueue + cache = self.cache + poll = self.poll + join_exited_workers = self.join_exited_workers + check_timeouts = self.check_timeouts + on_state_change = self.on_state_change + + time_terminate = None + while cache and self._state != TERMINATE: + if check_timeouts is not None: + check_timeouts() + try: + ready, task = poll(1.0) + except (IOError, EOFError) as exc: + debug('result handler got %r -- exiting', exc) + return + + if ready: + if task is None: + debug('result handler ignoring extra sentinel') + continue + + on_state_change(task) + try: + join_exited_workers(shutdown=True) + except WorkersJoined: + now = monotonic() + if not time_terminate: + time_terminate = now + else: + if now - time_terminate > 5.0: + debug('result handler exiting: timed out') + break + debug('result handler: all workers terminated, ' + 'timeout in %ss', + abs(min(now - time_terminate - 5.0, 0))) + + if hasattr(outqueue, '_reader'): + debug('ensuring that outqueue is not full') + # If we don't make room available in outqueue then + # attempts to add the sentinel (None) to outqueue may + # block. There is guaranteed to be no more than 2 sentinels. + try: + for i in range(10): + if not outqueue._reader.poll(): + break + get() + except (IOError, EOFError): + pass + + debug('result handler exiting: len(cache)=%s, thread._state=%s', + len(cache), self._state) + + +class Pool(object): + ''' + Class which supports an async version of applying functions to arguments. + ''' + Worker = Worker + Supervisor = Supervisor + TaskHandler = TaskHandler + TimeoutHandler = TimeoutHandler + ResultHandler = ResultHandler + SoftTimeLimitExceeded = SoftTimeLimitExceeded + + def __init__(self, processes=None, initializer=None, initargs=(), + maxtasksperchild=None, timeout=None, soft_timeout=None, + lost_worker_timeout=None, + max_restarts=None, max_restart_freq=1, + on_process_up=None, + on_process_down=None, + on_timeout_set=None, + on_timeout_cancel=None, + threads=True, + semaphore=None, + putlocks=False, + allow_restart=False, + synack=False, + on_process_exit=None, + **kwargs): + self.synack = synack + self._setup_queues() + self._taskqueue = Queue() + self._cache = {} + self._state = RUN + self.timeout = timeout + self.soft_timeout = soft_timeout + self._maxtasksperchild = maxtasksperchild + self._initializer = initializer + self._initargs = initargs + self._on_process_exit = on_process_exit + self.lost_worker_timeout = lost_worker_timeout or LOST_WORKER_TIMEOUT + self.on_process_up = on_process_up + self.on_process_down = on_process_down + self.on_timeout_set = on_timeout_set + self.on_timeout_cancel = on_timeout_cancel + self.threads = threads + self.readers = {} + self.allow_restart = allow_restart + + if soft_timeout and SIG_SOFT_TIMEOUT is None: + warnings.warn(UserWarning( + "Soft timeouts are not supported: " + "on this platform: It does not have the SIGUSR1 signal.", + )) + soft_timeout = None + + self._processes = self.cpu_count() if processes is None else processes + self.max_restarts = max_restarts or round(self._processes * 100) + self.restart_state = restart_state(max_restarts, max_restart_freq or 1) + + if initializer is not None and not callable(initializer): + raise TypeError('initializer must be a callable') + + if on_process_exit is not None and not callable(on_process_exit): + raise TypeError('on_process_exit must be callable') + + self._pool = [] + self._poolctrl = {} + self.putlocks = putlocks + self._putlock = semaphore or LaxBoundedSemaphore(self._processes) + for i in range(self._processes): + self._create_worker_process(i) + + self._worker_handler = self.Supervisor(self) + if threads: + self._worker_handler.start() + + self._task_handler = self.TaskHandler(self._taskqueue, + self._quick_put, + self._outqueue, + self._pool) + if threads: + self._task_handler.start() + + # Thread killing timedout jobs. + self._timeout_handler = self.TimeoutHandler( + self._pool, self._cache, + self.soft_timeout, self.timeout, + ) + self._timeout_handler_mutex = Lock() + self._timeout_handler_started = False + if self.timeout is not None or self.soft_timeout is not None: + self._start_timeout_handler() + + # If running without threads, we need to check for timeouts + # while waiting for unfinished work at shutdown. + self.check_timeouts = None + if not threads: + self.check_timeouts = self._timeout_handler.handle_event + + # Thread processing results in the outqueue. + self._result_handler = self.create_result_handler() + self.handle_result_event = self._result_handler.handle_event + + if threads: + self._result_handler.start() + + self._terminate = Finalize( + self, self._terminate_pool, + args=(self._taskqueue, self._inqueue, self._outqueue, + self._pool, self._worker_handler, self._task_handler, + self._result_handler, self._cache, + self._timeout_handler, + self._help_stuff_finish_args()), + exitpriority=15, + ) + + def create_result_handler(self, **extra_kwargs): + return self.ResultHandler( + self._outqueue, self._quick_get, self._cache, + self._poll_result, self._join_exited_workers, + self._putlock, self.restart_state, self.check_timeouts, + self.on_job_ready, **extra_kwargs + ) + + def on_job_ready(self, job, i, obj, inqW_fd): + pass + + def _help_stuff_finish_args(self): + return self._inqueue, self._task_handler, self._pool + + def cpu_count(self): + try: + return cpu_count() + except NotImplementedError: + return 1 + + def handle_result_event(self, *args): + return self._result_handler.handle_event(*args) + + def _process_register_queues(self, worker, queues): + pass + + def _process_by_pid(self, pid): + return next(( + (proc, i) for i, proc in enumerate(self._pool) + if proc.pid == pid + ), (None, None)) + + def get_process_queues(self): + return self._inqueue, self._outqueue, None + + def _create_worker_process(self, i): + sentinel = Event() if self.allow_restart else None + inq, outq, synq = self.get_process_queues() + w = self.Worker( + inq, outq, synq, self._initializer, self._initargs, + self._maxtasksperchild, sentinel, self._on_process_exit, + # Need to handle all signals if using the ipc semaphore, + # to make sure the semaphore is released. + sigprotection=self.threads, + ) + self._pool.append(w) + self._process_register_queues(w, (inq, outq, synq)) + w.name = w.name.replace('Process', 'PoolWorker') + w.daemon = True + w.index = i + w.start() + self._poolctrl[w.pid] = sentinel + if self.on_process_up: + self.on_process_up(w) + return w + + def process_flush_queues(self, worker): + pass + + def _join_exited_workers(self, shutdown=False): + """Cleanup after any worker processes which have exited due to + reaching their specified lifetime. Returns True if any workers were + cleaned up. + """ + now = None + # The worker may have published a result before being terminated, + # but we have no way to accurately tell if it did. So we wait for + # _lost_worker_timeout seconds before we mark the job with + # WorkerLostError. + for job in [job for job in list(self._cache.values()) + if not job.ready() and job._worker_lost]: + now = now or monotonic() + lost_time, lost_ret = job._worker_lost + if now - lost_time > job._lost_worker_timeout: + self.mark_as_worker_lost(job, lost_ret) + + if shutdown and not len(self._pool): + raise WorkersJoined() + + cleaned, exitcodes = {}, {} + for i in reversed(range(len(self._pool))): + worker = self._pool[i] + exitcode = worker.exitcode + popen = worker._popen + if popen is None or exitcode is not None: + # worker exited + debug('Supervisor: cleaning up worker %d', i) + if popen is not None: + worker.join() + debug('Supervisor: worked %d joined', i) + cleaned[worker.pid] = worker + exitcodes[worker.pid] = exitcode + if exitcode not in (EX_OK, EX_RECYCLE) and \ + not getattr(worker, '_controlled_termination', False): + error( + 'Process %r pid:%r exited with %r', + worker.name, worker.pid, human_status(exitcode), + exc_info=0, + ) + self.process_flush_queues(worker) + del self._pool[i] + del self._poolctrl[worker.pid] + if cleaned: + all_pids = [w.pid for w in self._pool] + for job in list(self._cache.values()): + acked_by_gone = next( + (pid for pid in job.worker_pids() + if pid in cleaned or pid not in all_pids), + None + ) + # already accepted by process + if acked_by_gone: + self.on_job_process_down(job, acked_by_gone) + if not job.ready(): + exitcode = exitcodes.get(acked_by_gone) or 0 + proc = cleaned.get(acked_by_gone) + if proc and getattr(proc, '_job_terminated', False): + job._set_terminated(exitcode) + else: + self.on_job_process_lost( + job, acked_by_gone, exitcode, + ) + else: + # started writing to + write_to = job._write_to + # was scheduled to write to + sched_for = job._scheduled_for + + if write_to and not write_to._is_alive(): + self.on_job_process_down(job, write_to.pid) + elif sched_for and not sched_for._is_alive(): + self.on_job_process_down(job, sched_for.pid) + + for worker in values(cleaned): + if self.on_process_down: + if not shutdown: + self._process_cleanup_queues(worker) + self.on_process_down(worker) + return list(exitcodes.values()) + return [] + + def on_partial_read(self, job, worker): + pass + + def _process_cleanup_queues(self, worker): + pass + + def on_job_process_down(self, job, pid_gone): + pass + + def on_job_process_lost(self, job, pid, exitcode): + job._worker_lost = (monotonic(), exitcode) + + def mark_as_worker_lost(self, job, exitcode): + try: + raise WorkerLostError( + 'Worker exited prematurely: {0}.'.format( + human_status(exitcode)), + ) + except WorkerLostError: + job._set(None, (False, ExceptionInfo())) + else: # pragma: no cover + pass + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + return self.terminate() + + def on_grow(self, n): + pass + + def on_shrink(self, n): + pass + + def shrink(self, n=1): + for i, worker in enumerate(self._iterinactive()): + self._processes -= 1 + if self._putlock: + self._putlock.shrink() + worker.terminate_controlled() + self.on_shrink(1) + if i >= n - 1: + break + else: + raise ValueError("Can't shrink pool. All processes busy!") + + def grow(self, n=1): + for i in range(n): + self._processes += 1 + if self._putlock: + self._putlock.grow() + self.on_grow(n) + + def _iterinactive(self): + for worker in self._pool: + if not self._worker_active(worker): + yield worker + raise StopIteration() + + def _worker_active(self, worker): + for job in values(self._cache): + if worker.pid in job.worker_pids(): + return True + return False + + def _repopulate_pool(self, exitcodes): + """Bring the number of pool processes up to the specified number, + for use after reaping workers which have exited. + """ + for i in range(self._processes - len(self._pool)): + if self._state != RUN: + return + try: + if exitcodes and exitcodes[i] not in (EX_OK, EX_RECYCLE): + self.restart_state.step() + except IndexError: + self.restart_state.step() + self._create_worker_process(self._avail_index()) + debug('added worker') + + def _avail_index(self): + assert len(self._pool) < self._processes + indices = set(p.index for p in self._pool) + return next(i for i in range(self._processes) if i not in indices) + + def did_start_ok(self): + return not self._join_exited_workers() + + def _maintain_pool(self): + """"Clean up any exited workers and start replacements for them. + """ + joined = self._join_exited_workers() + self._repopulate_pool(joined) + for i in range(len(joined)): + if self._putlock is not None: + self._putlock.release() + + def maintain_pool(self): + if self._worker_handler._state == RUN and self._state == RUN: + try: + self._maintain_pool() + except RestartFreqExceeded: + self.close() + self.join() + raise + except OSError as exc: + if get_errno(exc) == errno.ENOMEM: + reraise(MemoryError, + MemoryError(str(exc)), + sys.exc_info()[2]) + raise + + def _setup_queues(self): + from billiard.queues import SimpleQueue + self._inqueue = SimpleQueue() + self._outqueue = SimpleQueue() + self._quick_put = self._inqueue._writer.send + self._quick_get = self._outqueue._reader.recv + + def _poll_result(timeout): + if self._outqueue._reader.poll(timeout): + return True, self._quick_get() + return False, None + self._poll_result = _poll_result + + def _start_timeout_handler(self): + # ensure more than one thread does not start the timeout handler + # thread at once. + if self.threads: + with self._timeout_handler_mutex: + if not self._timeout_handler_started: + self._timeout_handler_started = True + self._timeout_handler.start() + + def apply(self, func, args=(), kwds={}): + ''' + Equivalent of `func(*args, **kwargs)`. + ''' + if self._state == RUN: + return self.apply_async(func, args, kwds).get() + + def starmap(self, func, iterable, chunksize=None): + ''' + Like `map()` method but the elements of the `iterable` are expected to + be iterables as well and will be unpacked as arguments. Hence + `func` and (a, b) becomes func(a, b). + ''' + if self._state == RUN: + return self._map_async(func, iterable, + starmapstar, chunksize).get() + + def starmap_async(self, func, iterable, chunksize=None, + callback=None, error_callback=None): + ''' + Asynchronous version of `starmap()` method. + ''' + if self._state == RUN: + return self._map_async(func, iterable, starmapstar, chunksize, + callback, error_callback) + + def map(self, func, iterable, chunksize=None): + ''' + Apply `func` to each element in `iterable`, collecting the results + in a list that is returned. + ''' + if self._state == RUN: + return self.map_async(func, iterable, chunksize).get() + + def imap(self, func, iterable, chunksize=1, lost_worker_timeout=None): + ''' + Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. + ''' + if self._state != RUN: + return + lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout + if chunksize == 1: + result = IMapIterator(self._cache, + lost_worker_timeout=lost_worker_timeout) + self._taskqueue.put(( + ((TASK, (result._job, i, func, (x,), {})) + for i, x in enumerate(iterable)), + result._set_length, + )) + return result + else: + assert chunksize > 1 + task_batches = Pool._get_tasks(func, iterable, chunksize) + result = IMapIterator(self._cache, + lost_worker_timeout=lost_worker_timeout) + self._taskqueue.put(( + ((TASK, (result._job, i, mapstar, (x,), {})) + for i, x in enumerate(task_batches)), + result._set_length, + )) + return (item for chunk in result for item in chunk) + + def imap_unordered(self, func, iterable, chunksize=1, + lost_worker_timeout=None): + ''' + Like `imap()` method but ordering of results is arbitrary. + ''' + if self._state != RUN: + return + lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout + if chunksize == 1: + result = IMapUnorderedIterator( + self._cache, lost_worker_timeout=lost_worker_timeout, + ) + self._taskqueue.put(( + ((TASK, (result._job, i, func, (x,), {})) + for i, x in enumerate(iterable)), + result._set_length, + )) + return result + else: + assert chunksize > 1 + task_batches = Pool._get_tasks(func, iterable, chunksize) + result = IMapUnorderedIterator( + self._cache, lost_worker_timeout=lost_worker_timeout, + ) + self._taskqueue.put(( + ((TASK, (result._job, i, mapstar, (x,), {})) + for i, x in enumerate(task_batches)), + result._set_length, + )) + return (item for chunk in result for item in chunk) + + def apply_async(self, func, args=(), kwds={}, + callback=None, error_callback=None, accept_callback=None, + timeout_callback=None, waitforslot=None, + soft_timeout=None, timeout=None, lost_worker_timeout=None, + callbacks_propagate=(), + correlation_id=None): + ''' + Asynchronous equivalent of `apply()` method. + + Callback is called when the functions return value is ready. + The accept callback is called when the job is accepted to be executed. + + Simplified the flow is like this: + + >>> def apply_async(func, args, kwds, callback, accept_callback): + ... if accept_callback: + ... accept_callback() + ... retval = func(*args, **kwds) + ... if callback: + ... callback(retval) + + ''' + if self._state != RUN: + return + soft_timeout = soft_timeout or self.soft_timeout + timeout = timeout or self.timeout + lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout + if soft_timeout and SIG_SOFT_TIMEOUT is None: + warnings.warn(UserWarning( + "Soft timeouts are not supported: " + "on this platform: It does not have the SIGUSR1 signal.", + )) + soft_timeout = None + if self._state == RUN: + waitforslot = self.putlocks if waitforslot is None else waitforslot + if waitforslot and self._putlock is not None: + self._putlock.acquire() + result = ApplyResult( + self._cache, callback, accept_callback, timeout_callback, + error_callback, soft_timeout, timeout, lost_worker_timeout, + on_timeout_set=self.on_timeout_set, + on_timeout_cancel=self.on_timeout_cancel, + callbacks_propagate=callbacks_propagate, + send_ack=self.send_ack if self.synack else None, + correlation_id=correlation_id, + ) + if timeout or soft_timeout: + # start the timeout handler thread when required. + self._start_timeout_handler() + if self.threads: + self._taskqueue.put(([(TASK, (result._job, None, + func, args, kwds))], None)) + else: + self._quick_put((TASK, (result._job, None, func, args, kwds))) + return result + + def send_ack(self, response, job, i, fd): + pass + + def terminate_job(self, pid, sig=None): + proc, _ = self._process_by_pid(pid) + if proc is not None: + try: + _kill(pid, sig or signal.SIGTERM) + except OSError as exc: + if get_errno(exc) != errno.ESRCH: + raise + else: + proc._controlled_termination = True + proc._job_terminated = True + + def map_async(self, func, iterable, chunksize=None, + callback=None, error_callback=None): + ''' + Asynchronous equivalent of `map()` method. + ''' + return self._map_async( + func, iterable, mapstar, chunksize, callback, error_callback, + ) + + def _map_async(self, func, iterable, mapper, chunksize=None, + callback=None, error_callback=None): + ''' + Helper function to implement map, starmap and their async counterparts. + ''' + if self._state != RUN: + return + if not hasattr(iterable, '__len__'): + iterable = list(iterable) + + if chunksize is None: + chunksize, extra = divmod(len(iterable), len(self._pool) * 4) + if extra: + chunksize += 1 + if len(iterable) == 0: + chunksize = 0 + + task_batches = Pool._get_tasks(func, iterable, chunksize) + result = MapResult(self._cache, chunksize, len(iterable), callback, + error_callback=error_callback) + self._taskqueue.put((((TASK, (result._job, i, mapper, (x,), {})) + for i, x in enumerate(task_batches)), None)) + return result + + @staticmethod + def _get_tasks(func, it, size): + it = iter(it) + while 1: + x = tuple(itertools.islice(it, size)) + if not x: + return + yield (func, x) + + def __reduce__(self): + raise NotImplementedError( + 'pool objects cannot be passed between processes or pickled', + ) + + def close(self): + debug('closing pool') + if self._state == RUN: + self._state = CLOSE + if self._putlock: + self._putlock.clear() + self._worker_handler.close() + self._taskqueue.put(None) + stop_if_not_current(self._worker_handler) + + def terminate(self): + debug('terminating pool') + self._state = TERMINATE + self._worker_handler.terminate() + self._terminate() + + @staticmethod + def _stop_task_handler(task_handler): + stop_if_not_current(task_handler) + + def join(self): + assert self._state in (CLOSE, TERMINATE) + debug('joining worker handler') + stop_if_not_current(self._worker_handler) + debug('joining task handler') + self._stop_task_handler(self._task_handler) + debug('joining result handler') + stop_if_not_current(self._result_handler) + debug('result handler joined') + for i, p in enumerate(self._pool): + debug('joining worker %s/%s (%r)', i+1, len(self._pool), p) + if p._popen is not None: # process started? + p.join() + debug('pool join complete') + + def restart(self): + for e in values(self._poolctrl): + e.set() + + @staticmethod + def _help_stuff_finish(inqueue, task_handler, _pool): + # task_handler may be blocked trying to put items on inqueue + debug('removing tasks from inqueue until task handler finished') + inqueue._rlock.acquire() + while task_handler.is_alive() and inqueue._reader.poll(): + inqueue._reader.recv() + time.sleep(0) + + @classmethod + def _set_result_sentinel(cls, outqueue, pool): + outqueue.put(None) + + @classmethod + def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, + worker_handler, task_handler, + result_handler, cache, timeout_handler, + help_stuff_finish_args): + + # this is guaranteed to only be called once + debug('finalizing pool') + + worker_handler.terminate() + + task_handler.terminate() + taskqueue.put(None) # sentinel + + debug('helping task handler/workers to finish') + cls._help_stuff_finish(*help_stuff_finish_args) + + result_handler.terminate() + cls._set_result_sentinel(outqueue, pool) + + if timeout_handler is not None: + timeout_handler.terminate() + + # Terminate workers which haven't already finished + if pool and hasattr(pool[0], 'terminate'): + debug('terminating workers') + for p in pool: + if p._is_alive(): + p.terminate() + + debug('joining task handler') + cls._stop_task_handler(task_handler) + + debug('joining result handler') + result_handler.stop() + + if timeout_handler is not None: + debug('joining timeout handler') + timeout_handler.stop(TIMEOUT_MAX) + + if pool and hasattr(pool[0], 'terminate'): + debug('joining pool workers') + for p in pool: + if p.is_alive(): + # worker has not yet exited + debug('cleaning up worker %d', p.pid) + if p._popen is not None: + p.join() + debug('pool workers joined') + + @property + def process_sentinels(self): + return [w._popen.sentinel for w in self._pool] + +# +# Class whose instances are returned by `Pool.apply_async()` +# + + +class ApplyResult(object): + _worker_lost = None + _write_to = None + _scheduled_for = None + + def __init__(self, cache, callback, accept_callback=None, + timeout_callback=None, error_callback=None, soft_timeout=None, + timeout=None, lost_worker_timeout=LOST_WORKER_TIMEOUT, + on_timeout_set=None, on_timeout_cancel=None, + callbacks_propagate=(), send_ack=None, + correlation_id=None): + self.correlation_id = correlation_id + self._mutex = Lock() + self._event = threading.Event() + self._job = next(job_counter) + self._cache = cache + self._callback = callback + self._accept_callback = accept_callback + self._error_callback = error_callback + self._timeout_callback = timeout_callback + self._timeout = timeout + self._soft_timeout = soft_timeout + self._lost_worker_timeout = lost_worker_timeout + self._on_timeout_set = on_timeout_set + self._on_timeout_cancel = on_timeout_cancel + self._callbacks_propagate = callbacks_propagate or () + self._send_ack = send_ack + + self._accepted = False + self._cancelled = False + self._worker_pid = None + self._time_accepted = None + self._terminated = None + cache[self._job] = self + + def __repr__(self): + return ''.format( + id=self._job, ack=self._accepted, ready=self.ready(), + ) + + def ready(self): + return self._event.isSet() + + def accepted(self): + return self._accepted + + def successful(self): + assert self.ready() + return self._success + + def _cancel(self): + """Only works if synack is used.""" + self._cancelled = True + + def discard(self): + self._cache.pop(self._job, None) + + def terminate(self, signum): + self._terminated = signum + + def _set_terminated(self, signum=None): + try: + raise Terminated(-(signum or 0)) + except Terminated: + self._set(None, (False, ExceptionInfo())) + + def worker_pids(self): + return [self._worker_pid] if self._worker_pid else [] + + def wait(self, timeout=None): + self._event.wait(timeout) + + def get(self, timeout=None): + self.wait(timeout) + if not self.ready(): + raise TimeoutError + if self._success: + return self._value + else: + raise self._value.exception + + def safe_apply_callback(self, fun, *args, **kwargs): + if fun: + try: + fun(*args, **kwargs) + except self._callbacks_propagate: + raise + except Exception as exc: + error('Pool callback raised exception: %r', exc, + exc_info=1) + + def handle_timeout(self, soft=False): + if self._timeout_callback is not None: + self.safe_apply_callback( + self._timeout_callback, soft=soft, + timeout=self._soft_timeout if soft else self._timeout, + ) + + def _set(self, i, obj): + with self._mutex: + if self._on_timeout_cancel: + self._on_timeout_cancel(self) + self._success, self._value = obj + self._event.set() + if self._accepted: + # if not accepted yet, then the set message + # was received before the ack, which means + # the ack will remove the entry. + self._cache.pop(self._job, None) + + # apply callbacks last + if self._callback and self._success: + self.safe_apply_callback( + self._callback, self._value) + if (self._value is not None and + self._error_callback and not self._success): + self.safe_apply_callback( + self._error_callback, self._value) + + def _ack(self, i, time_accepted, pid, synqW_fd): + with self._mutex: + if self._cancelled and self._send_ack: + self._accepted = True + if synqW_fd: + return self._send_ack(NACK, pid, self._job, synqW_fd) + return + self._accepted = True + self._time_accepted = time_accepted + self._worker_pid = pid + if self.ready(): + # ack received after set() + self._cache.pop(self._job, None) + if self._on_timeout_set: + self._on_timeout_set(self, self._soft_timeout, self._timeout) + response = ACK + if self._accept_callback: + try: + self._accept_callback(pid, time_accepted) + except self._propagate_errors: + response = NACK + raise + except Exception: + response = NACK + # ignore other errors + finally: + if self._send_ack and synqW_fd: + return self._send_ack( + response, pid, self._job, synqW_fd + ) + if self._send_ack and synqW_fd: + self._send_ack(response, pid, self._job, synqW_fd) + +# +# Class whose instances are returned by `Pool.map_async()` +# + + +class MapResult(ApplyResult): + + def __init__(self, cache, chunksize, length, callback, error_callback): + ApplyResult.__init__( + self, cache, callback, error_callback=error_callback, + ) + self._success = True + self._length = length + self._value = [None] * length + self._accepted = [False] * length + self._worker_pid = [None] * length + self._time_accepted = [None] * length + self._chunksize = chunksize + if chunksize <= 0: + self._number_left = 0 + self._event.set() + del cache[self._job] + else: + self._number_left = length // chunksize + bool(length % chunksize) + + def _set(self, i, success_result): + success, result = success_result + if success: + self._value[i * self._chunksize:(i + 1) * self._chunksize] = result + self._number_left -= 1 + if self._number_left == 0: + if self._callback: + self._callback(self._value) + if self._accepted: + self._cache.pop(self._job, None) + self._event.set() + else: + self._success = False + self._value = result + if self._error_callback: + self._error_callback(self._value) + if self._accepted: + self._cache.pop(self._job, None) + self._event.set() + + def _ack(self, i, time_accepted, pid, *args): + start = i * self._chunksize + stop = min((i + 1) * self._chunksize, self._length) + for j in range(start, stop): + self._accepted[j] = True + self._worker_pid[j] = pid + self._time_accepted[j] = time_accepted + if self.ready(): + self._cache.pop(self._job, None) + + def accepted(self): + return all(self._accepted) + + def worker_pids(self): + return [pid for pid in self._worker_pid if pid] + +# +# Class whose instances are returned by `Pool.imap()` +# + + +class IMapIterator(object): + _worker_lost = None + + def __init__(self, cache, lost_worker_timeout=LOST_WORKER_TIMEOUT): + self._cond = threading.Condition(threading.Lock()) + self._job = next(job_counter) + self._cache = cache + self._items = deque() + self._index = 0 + self._length = None + self._ready = False + self._unsorted = {} + self._worker_pids = [] + self._lost_worker_timeout = lost_worker_timeout + cache[self._job] = self + + def __iter__(self): + return self + + def next(self, timeout=None): + with self._cond: + try: + item = self._items.popleft() + except IndexError: + if self._index == self._length: + self._ready = True + raise StopIteration + self._cond.wait(timeout) + try: + item = self._items.popleft() + except IndexError: + if self._index == self._length: + self._ready = True + raise StopIteration + raise TimeoutError + + success, value = item + if success: + return value + raise Exception(value) + + __next__ = next # XXX + + def _set(self, i, obj): + with self._cond: + if self._index == i: + self._items.append(obj) + self._index += 1 + while self._index in self._unsorted: + obj = self._unsorted.pop(self._index) + self._items.append(obj) + self._index += 1 + self._cond.notify() + else: + self._unsorted[i] = obj + + if self._index == self._length: + self._ready = True + del self._cache[self._job] + + def _set_length(self, length): + with self._cond: + self._length = length + if self._index == self._length: + self._ready = True + self._cond.notify() + del self._cache[self._job] + + def _ack(self, i, time_accepted, pid, *args): + self._worker_pids.append(pid) + + def ready(self): + return self._ready + + def worker_pids(self): + return self._worker_pids + +# +# Class whose instances are returned by `Pool.imap_unordered()` +# + + +class IMapUnorderedIterator(IMapIterator): + + def _set(self, i, obj): + with self._cond: + self._items.append(obj) + self._index += 1 + self._cond.notify() + if self._index == self._length: + self._ready = True + del self._cache[self._job] + +# +# +# + + +class ThreadPool(Pool): + + from billiard.dummy import Process as DummyProcess + Process = DummyProcess + + def __init__(self, processes=None, initializer=None, initargs=()): + Pool.__init__(self, processes, initializer, initargs) + + def _setup_queues(self): + self._inqueue = Queue() + self._outqueue = Queue() + self._quick_put = self._inqueue.put + self._quick_get = self._outqueue.get + + def _poll_result(timeout): + try: + return True, self._quick_get(timeout=timeout) + except Empty: + return False, None + self._poll_result = _poll_result + + @staticmethod + def _help_stuff_finish(inqueue, task_handler, pool): + # put sentinels at head of inqueue to make workers finish + with inqueue.not_empty: + inqueue.queue.clear() + inqueue.queue.extend([None] * len(pool)) + inqueue.not_empty.notify_all() diff --git a/thesisenv/lib/python3.6/site-packages/billiard/process.py b/thesisenv/lib/python3.6/site-packages/billiard/process.py new file mode 100644 index 0000000..a6080ff --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/process.py @@ -0,0 +1,368 @@ +# +# Module providing the `Process` class which emulates `threading.Thread` +# +# multiprocessing/process.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# +from __future__ import absolute_import + +# +# Imports +# + +import os +import sys +import signal +import itertools +import binascii +import logging +import threading + +from multiprocessing import process as _mproc + +from .compat import bytes +try: + from _weakrefset import WeakSet +except ImportError: + WeakSet = None # noqa +from .five import items, string_t + +try: + ORIGINAL_DIR = os.path.abspath(os.getcwd()) +except OSError: + ORIGINAL_DIR = None + +__all__ = ['Process', 'current_process', 'active_children'] + +# +# Public functions +# + + +def current_process(): + ''' + Return process object representing the current process + ''' + return _current_process + + +def _set_current_process(process): + global _current_process + _current_process = _mproc._current_process = process + + +def _cleanup(): + # check for processes which have finished + if _current_process is not None: + for p in list(_current_process._children): + if p._popen.poll() is not None: + _current_process._children.discard(p) + + +def _maybe_flush(f): + try: + f.flush() + except (AttributeError, EnvironmentError, NotImplementedError): + pass + + +def active_children(_cleanup=_cleanup): + ''' + Return list of process objects corresponding to live child processes + ''' + try: + _cleanup() + except TypeError: + # called after gc collect so _cleanup does not exist anymore + return [] + if _current_process is not None: + return list(_current_process._children) + return [] + + +class Process(object): + ''' + Process objects represent activity that is run in a separate process + + The class is analagous to `threading.Thread` + ''' + _Popen = None + + def __init__(self, group=None, target=None, name=None, + args=(), kwargs={}, daemon=None, **_kw): + assert group is None, 'group argument must be None for now' + count = next(_current_process._counter) + self._identity = _current_process._identity + (count,) + self._authkey = _current_process._authkey + if daemon is not None: + self._daemonic = daemon + else: + self._daemonic = _current_process._daemonic + self._tempdir = _current_process._tempdir + self._semprefix = _current_process._semprefix + self._unlinkfd = _current_process._unlinkfd + self._parent_pid = os.getpid() + self._popen = None + self._target = target + self._args = tuple(args) + self._kwargs = dict(kwargs) + self._name = ( + name or type(self).__name__ + '-' + + ':'.join(str(i) for i in self._identity) + ) + if _dangling is not None: + _dangling.add(self) + + def run(self): + ''' + Method to be run in sub-process; can be overridden in sub-class + ''' + if self._target: + self._target(*self._args, **self._kwargs) + + def start(self): + ''' + Start child process + ''' + assert self._popen is None, 'cannot start a process twice' + assert self._parent_pid == os.getpid(), \ + 'can only start a process object created by current process' + _cleanup() + if self._Popen is not None: + Popen = self._Popen + else: + from .forking import Popen + self._popen = Popen(self) + self._sentinel = self._popen.sentinel + _current_process._children.add(self) + + def terminate(self): + ''' + Terminate process; sends SIGTERM signal or uses TerminateProcess() + ''' + self._popen.terminate() + + def join(self, timeout=None): + ''' + Wait until child process terminates + ''' + assert self._parent_pid == os.getpid(), 'can only join a child process' + assert self._popen is not None, 'can only join a started process' + res = self._popen.wait(timeout) + if res is not None: + _current_process._children.discard(self) + + def is_alive(self): + ''' + Return whether process is alive + ''' + if self is _current_process: + return True + assert self._parent_pid == os.getpid(), 'can only test a child process' + if self._popen is None: + return False + self._popen.poll() + return self._popen.returncode is None + + def _is_alive(self): + if self._popen is None: + return False + return self._popen.poll() is None + + def _get_name(self): + return self._name + + def _set_name(self, value): + assert isinstance(name, string_t), 'name must be a string' + self._name = value + name = property(_get_name, _set_name) + + def _get_daemon(self): + return self._daemonic + + def _set_daemon(self, daemonic): + assert self._popen is None, 'process has already started' + self._daemonic = daemonic + daemon = property(_get_daemon, _set_daemon) + + def _get_authkey(self): + return self._authkey + + def _set_authkey(self, authkey): + self._authkey = AuthenticationString(authkey) + authkey = property(_get_authkey, _set_authkey) + + @property + def exitcode(self): + ''' + Return exit code of process or `None` if it has yet to stop + ''' + if self._popen is None: + return self._popen + return self._popen.poll() + + @property + def ident(self): + ''' + Return identifier (PID) of process or `None` if it has yet to start + ''' + if self is _current_process: + return os.getpid() + else: + return self._popen and self._popen.pid + + pid = ident + + @property + def sentinel(self): + ''' + Return a file descriptor (Unix) or handle (Windows) suitable for + waiting for process termination. + ''' + try: + return self._sentinel + except AttributeError: + raise ValueError("process not started") + + def __repr__(self): + if self is _current_process: + status = 'started' + elif self._parent_pid != os.getpid(): + status = 'unknown' + elif self._popen is None: + status = 'initial' + else: + if self._popen.poll() is not None: + status = self.exitcode + else: + status = 'started' + + if type(status) is int: + if status == 0: + status = 'stopped' + else: + status = 'stopped[%s]' % _exitcode_to_name.get(status, status) + + return '<%s(%s, %s%s)>' % (type(self).__name__, self._name, + status, self._daemonic and ' daemon' or '') + + ## + + def _bootstrap(self): + from . import util + global _current_process + + try: + self._children = set() + self._counter = itertools.count(1) + if sys.stdin is not None: + try: + sys.stdin.close() + sys.stdin = open(os.devnull) + except (OSError, ValueError): + pass + old_process = _current_process + _set_current_process(self) + + # Re-init logging system. + # Workaround for http://bugs.python.org/issue6721/#msg140215 + # Python logging module uses RLock() objects which are broken + # after fork. This can result in a deadlock (Celery Issue #496). + loggerDict = logging.Logger.manager.loggerDict + logger_names = list(loggerDict.keys()) + logger_names.append(None) # for root logger + for name in logger_names: + if not name or not isinstance(loggerDict[name], + logging.PlaceHolder): + for handler in logging.getLogger(name).handlers: + handler.createLock() + logging._lock = threading.RLock() + + try: + util._finalizer_registry.clear() + util._run_after_forkers() + finally: + # delay finalization of the old process object until after + # _run_after_forkers() is executed + del old_process + util.info('child process %s calling self.run()', self.pid) + try: + self.run() + exitcode = 0 + finally: + util._exit_function() + except SystemExit as exc: + if not exc.args: + exitcode = 1 + elif isinstance(exc.args[0], int): + exitcode = exc.args[0] + else: + sys.stderr.write(str(exc.args[0]) + '\n') + _maybe_flush(sys.stderr) + exitcode = 0 if isinstance(exc.args[0], str) else 1 + except: + exitcode = 1 + if not util.error('Process %s', self.name, exc_info=True): + import traceback + sys.stderr.write('Process %s:\n' % self.name) + traceback.print_exc() + finally: + util.info('process %s exiting with exitcode %d', + self.pid, exitcode) + _maybe_flush(sys.stdout) + _maybe_flush(sys.stderr) + return exitcode + +# +# We subclass bytes to avoid accidental transmission of auth keys over network +# + + +class AuthenticationString(bytes): + + def __reduce__(self): + from .forking import Popen + + if not Popen.thread_is_spawning(): + raise TypeError( + 'Pickling an AuthenticationString object is ' + 'disallowed for security reasons') + return AuthenticationString, (bytes(self),) + +# +# Create object representing the main process +# + + +class _MainProcess(Process): + + def __init__(self): + self._identity = () + self._daemonic = False + self._name = 'MainProcess' + self._parent_pid = None + self._popen = None + self._counter = itertools.count(1) + self._children = set() + self._authkey = AuthenticationString(os.urandom(32)) + self._tempdir = None + self._semprefix = 'mp-' + binascii.hexlify( + os.urandom(4)).decode('ascii') + self._unlinkfd = None + +_current_process = _MainProcess() +del _MainProcess + +# +# Give names to some return codes +# + +_exitcode_to_name = {} + +for name, signum in items(signal.__dict__): + if name[:3] == 'SIG' and '_' not in name: + _exitcode_to_name[-signum] = name + +_dangling = WeakSet() if WeakSet is not None else None diff --git a/thesisenv/lib/python3.6/site-packages/billiard/py3/__init__.py b/thesisenv/lib/python3.6/site-packages/billiard/py3/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/billiard/py3/connection.py b/thesisenv/lib/python3.6/site-packages/billiard/py3/connection.py new file mode 100644 index 0000000..bfce702 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/py3/connection.py @@ -0,0 +1,965 @@ +# +# A higher level module for using sockets (or Windows named pipes) +# +# multiprocessing/connection.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# +from __future__ import absolute_import + +import io +import os +import sys +import select +import socket +import struct +import errno +import tempfile +import itertools + +import _multiprocessing +from ..compat import setblocking +from ..exceptions import AuthenticationError, BufferTooShort +from ..five import monotonic +from ..util import get_temp_dir, Finalize, sub_debug +from ..reduction import ForkingPickler + +try: + import _winapi + + WAIT_OBJECT_0 = _winapi.WAIT_OBJECT_0 + WAIT_TIMEOUT = _winapi.WAIT_TIMEOUT + INFINITE = _winapi.INFINITE + # if we got here, we seem to be running on Windows. Handle probably + # missing WAIT_ABANDONED_0 constant: + try: + WAIT_ABANDONED_0 = _winapi.WAIT_ABANDONED_0 + except ImportError: + # _winapi seems to be not exporting + # this constant, fallback solution until + # exported in _winapio + WAIT_ABANDONED_0 = 128 +except ImportError: + if sys.platform == 'win32': + raise + _winapi = None + +__all__ = ['Client', 'Listener', 'Pipe', 'wait'] + +# +# +# + +BUFSIZE = 8192 +# A very generous timeout when it comes to local connections... +CONNECTION_TIMEOUT = 20. + +_mmap_counter = itertools.count() + +default_family = 'AF_INET' +families = ['AF_INET'] + +if hasattr(socket, 'AF_UNIX'): + default_family = 'AF_UNIX' + families += ['AF_UNIX'] + +if sys.platform == 'win32': + default_family = 'AF_PIPE' + families += ['AF_PIPE'] + + +def _init_timeout(timeout=CONNECTION_TIMEOUT): + return monotonic() + timeout + + +def _check_timeout(t): + return monotonic() > t + + +def arbitrary_address(family): + ''' + Return an arbitrary free address for the given family + ''' + if family == 'AF_INET': + return ('localhost', 0) + elif family == 'AF_UNIX': + return tempfile.mktemp(prefix='listener-', dir=get_temp_dir()) + elif family == 'AF_PIPE': + return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' % + (os.getpid(), next(_mmap_counter))) + else: + raise ValueError('unrecognized family') + + +def _validate_family(family): + ''' + Checks if the family is valid for the current environment. + ''' + if sys.platform != 'win32' and family == 'AF_PIPE': + raise ValueError('Family %s is not recognized.' % family) + + if sys.platform == 'win32' and family == 'AF_UNIX': + # double check + if not hasattr(socket, family): + raise ValueError('Family %s is not recognized.' % family) + + +def address_type(address): + ''' + Return the types of the address + + This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE' + ''' + if type(address) == tuple: + return 'AF_INET' + elif type(address) is str and address.startswith('\\\\'): + return 'AF_PIPE' + elif type(address) is str: + return 'AF_UNIX' + else: + raise ValueError('address type of %r unrecognized' % address) + +# +# Connection classes +# + + +class _ConnectionBase: + _handle = None + + def __init__(self, handle, readable=True, writable=True): + handle = handle.__index__() + if handle < 0: + raise ValueError("invalid handle") + if not readable and not writable: + raise ValueError( + "at least one of `readable` and `writable` must be True") + self._handle = handle + self._readable = readable + self._writable = writable + + # XXX should we use util.Finalize instead of a __del__? + + def __del__(self): + if self._handle is not None: + self._close() + + def _check_closed(self): + if self._handle is None: + raise OSError("handle is closed") + + def _check_readable(self): + if not self._readable: + raise OSError("connection is write-only") + + def _check_writable(self): + if not self._writable: + raise OSError("connection is read-only") + + def _bad_message_length(self): + if self._writable: + self._readable = False + else: + self.close() + raise OSError("bad message length") + + @property + def closed(self): + """True if the connection is closed""" + return self._handle is None + + @property + def readable(self): + """True if the connection is readable""" + return self._readable + + @property + def writable(self): + """True if the connection is writable""" + return self._writable + + def fileno(self): + """File descriptor or handle of the connection""" + self._check_closed() + return self._handle + + def close(self): + """Close the connection""" + if self._handle is not None: + try: + self._close() + finally: + self._handle = None + + def send_bytes(self, buf, offset=0, size=None): + """Send the bytes data from a bytes-like object""" + self._check_closed() + self._check_writable() + m = memoryview(buf) + # HACK for byte-indexing of non-bytewise buffers (e.g. array.array) + if m.itemsize > 1: + m = memoryview(bytes(m)) + n = len(m) + if offset < 0: + raise ValueError("offset is negative") + if n < offset: + raise ValueError("buffer length < offset") + if size is None: + size = n - offset + elif size < 0: + raise ValueError("size is negative") + elif offset + size > n: + raise ValueError("buffer length < offset + size") + self._send_bytes(m[offset:offset + size]) + + def send(self, obj): + """Send a (picklable) object""" + self._check_closed() + self._check_writable() + self._send_bytes(ForkingPickler.dumps(obj)) + + def recv_bytes(self, maxlength=None): + """ + Receive bytes data as a bytes object. + """ + self._check_closed() + self._check_readable() + if maxlength is not None and maxlength < 0: + raise ValueError("negative maxlength") + buf = self._recv_bytes(maxlength) + if buf is None: + self._bad_message_length() + return buf.getvalue() + + def recv_bytes_into(self, buf, offset=0): + """ + Receive bytes data into a writeable buffer-like object. + Return the number of bytes read. + """ + self._check_closed() + self._check_readable() + with memoryview(buf) as m: + # Get bytesize of arbitrary buffer + itemsize = m.itemsize + bytesize = itemsize * len(m) + if offset < 0: + raise ValueError("negative offset") + elif offset > bytesize: + raise ValueError("offset too large") + result = self._recv_bytes() + size = result.tell() + if bytesize < offset + size: + raise BufferTooShort(result.getvalue()) + # Message can fit in dest + result.seek(0) + result.readinto( + m[offset // itemsize:(offset + size) // itemsize] + ) + return size + + def recv_payload(self): + return self._recv_bytes().getbuffer() + + def recv(self): + """Receive a (picklable) object""" + self._check_closed() + self._check_readable() + buf = self._recv_bytes() + return ForkingPickler.loads(buf.getbuffer()) + + def poll(self, timeout=0.0): + """Whether there is any input available to be read""" + self._check_closed() + self._check_readable() + return self._poll(timeout) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +if _winapi: + + class PipeConnection(_ConnectionBase): + """ + Connection class based on a Windows named pipe. + Overlapped I/O is used, so the handles must have been created + with FILE_FLAG_OVERLAPPED. + """ + _got_empty_message = False + + def _close(self, _CloseHandle=_winapi.CloseHandle): + _CloseHandle(self._handle) + + def _send_bytes(self, buf): + ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True) + try: + if err == _winapi.ERROR_IO_PENDING: + waitres = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + assert waitres == WAIT_OBJECT_0 + except: + ov.cancel() + raise + finally: + nwritten, err = ov.GetOverlappedResult(True) + assert err == 0 + assert nwritten == len(buf) + + def _recv_bytes(self, maxsize=None): + if self._got_empty_message: + self._got_empty_message = False + return io.BytesIO() + else: + bsize = 128 if maxsize is None else min(maxsize, 128) + try: + ov, err = _winapi.ReadFile(self._handle, bsize, + overlapped=True) + try: + if err == _winapi.ERROR_IO_PENDING: + waitres = _winapi.WaitForMultipleObjects( + [ov.event], False, INFINITE) + assert waitres == WAIT_OBJECT_0 + except: + ov.cancel() + raise + finally: + nread, err = ov.GetOverlappedResult(True) + if err == 0: + f = io.BytesIO() + f.write(ov.getbuffer()) + return f + elif err == _winapi.ERROR_MORE_DATA: + return self._get_more_data(ov, maxsize) + except OSError as e: + if e.winerror == _winapi.ERROR_BROKEN_PIPE: + raise EOFError + else: + raise + raise RuntimeError( + "shouldn't get here; expected KeyboardInterrupt" + ) + + def _poll(self, timeout): + if (self._got_empty_message or + _winapi.PeekNamedPipe(self._handle)[0] != 0): + return True + return bool(wait([self], timeout)) + + def _get_more_data(self, ov, maxsize): + buf = ov.getbuffer() + f = io.BytesIO() + f.write(buf) + left = _winapi.PeekNamedPipe(self._handle)[1] + assert left > 0 + if maxsize is not None and len(buf) + left > maxsize: + self._bad_message_length() + ov, err = _winapi.ReadFile(self._handle, left, overlapped=True) + rbytes, err = ov.GetOverlappedResult(True) + assert err == 0 + assert rbytes == left + f.write(ov.getbuffer()) + return f + + +class Connection(_ConnectionBase): + """ + Connection class based on an arbitrary file descriptor (Unix only), or + a socket handle (Windows). + """ + + if _winapi: + def _close(self, _close=_multiprocessing.closesocket): + _close(self._handle) + _write = _multiprocessing.send + _read = _multiprocessing.recv + else: + def _close(self, _close=os.close): # noqa + _close(self._handle) + _write = os.write + _read = os.read + + def send_offset(self, buf, offset, write=_write): + return write(self._handle, buf[offset:]) + + def _send(self, buf, write=_write): + remaining = len(buf) + while True: + try: + n = write(self._handle, buf) + except OSError as exc: + if exc.errno == errno.EINTR: + continue + raise + remaining -= n + if remaining == 0: + break + buf = buf[n:] + + def setblocking(self, blocking): + setblocking(self._handle, blocking) + + def _recv(self, size, read=_read): + buf = io.BytesIO() + handle = self._handle + remaining = size + while remaining > 0: + try: + chunk = read(handle, remaining) + except OSError as exc: + if exc.errno == errno.EINTR: + continue + raise + n = len(chunk) + if n == 0: + if remaining == size: + raise EOFError + else: + raise OSError("got end of file during message") + buf.write(chunk) + remaining -= n + return buf + + def _send_bytes(self, buf): + # For wire compatibility with 3.2 and lower + n = len(buf) + self._send(struct.pack("!i", n)) + # The condition is necessary to avoid "broken pipe" errors + # when sending a 0-length buffer if the other end closed the pipe. + if n > 0: + self._send(buf) + + def _recv_bytes(self, maxsize=None): + buf = self._recv(4) + size, = struct.unpack("!i", buf.getvalue()) + if maxsize is not None and size > maxsize: + return None + return self._recv(size) + + def _poll(self, timeout): + r = wait([self], timeout) + return bool(r) + + +# +# Public functions +# + +class Listener(object): + ''' + Returns a listener object. + + This is a wrapper for a bound socket which is 'listening' for + connections, or for a Windows named pipe. + ''' + def __init__(self, address=None, family=None, backlog=1, authkey=None): + family = (family or (address and address_type(address)) or + default_family) + address = address or arbitrary_address(family) + + _validate_family(family) + if family == 'AF_PIPE': + self._listener = PipeListener(address, backlog) + else: + self._listener = SocketListener(address, family, backlog) + + if authkey is not None and not isinstance(authkey, bytes): + raise TypeError('authkey should be a byte string') + + self._authkey = authkey + + def accept(self): + ''' + Accept a connection on the bound socket or named pipe of `self`. + + Returns a `Connection` object. + ''' + if self._listener is None: + raise OSError('listener is closed') + c = self._listener.accept() + if self._authkey: + deliver_challenge(c, self._authkey) + answer_challenge(c, self._authkey) + return c + + def close(self): + ''' + Close the bound socket or named pipe of `self`. + ''' + if self._listener is not None: + self._listener.close() + self._listener = None + + address = property(lambda self: self._listener._address) + last_accepted = property(lambda self: self._listener._last_accepted) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +def Client(address, family=None, authkey=None): + ''' + Returns a connection to the address of a `Listener` + ''' + family = family or address_type(address) + _validate_family(family) + if family == 'AF_PIPE': + c = PipeClient(address) + else: + c = SocketClient(address) + + if authkey is not None and not isinstance(authkey, bytes): + raise TypeError('authkey should be a byte string') + + if authkey is not None: + answer_challenge(c, authkey) + deliver_challenge(c, authkey) + + return c + + +if sys.platform != 'win32': + + def Pipe(duplex=True, rnonblock=False, wnonblock=False): + ''' + Returns pair of connection objects at either end of a pipe + ''' + if duplex: + s1, s2 = socket.socketpair() + s1.setblocking(not rnonblock) + s2.setblocking(not wnonblock) + c1 = Connection(s1.detach()) + c2 = Connection(s2.detach()) + else: + fd1, fd2 = os.pipe() + if rnonblock: + setblocking(fd1, 0) + if wnonblock: + setblocking(fd2, 0) + c1 = Connection(fd1, writable=False) + c2 = Connection(fd2, readable=False) + + return c1, c2 + +else: + from billiard.forking import duplicate + + def Pipe(duplex=True, rnonblock=False, wnonblock=False): # noqa + ''' + Returns pair of connection objects at either end of a pipe + ''' + address = arbitrary_address('AF_PIPE') + if duplex: + openmode = _winapi.PIPE_ACCESS_DUPLEX + access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE + obsize, ibsize = BUFSIZE, BUFSIZE + else: + openmode = _winapi.PIPE_ACCESS_INBOUND + access = _winapi.GENERIC_WRITE + obsize, ibsize = 0, BUFSIZE + + h1 = _winapi.CreateNamedPipe( + address, openmode | _winapi.FILE_FLAG_OVERLAPPED | + _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL + ) + h2 = _winapi.CreateFile( + address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, + _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL + ) + _winapi.SetNamedPipeHandleState( + h2, _winapi.PIPE_READMODE_MESSAGE, None, None + ) + + overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True) + _, err = overlapped.GetOverlappedResult(True) + assert err == 0 + + c1 = PipeConnection(duplicate(h1, inheritable=True), writable=duplex) + c2 = PipeConnection(duplicate(h2, inheritable=True), readable=duplex) + _winapi.CloseHandle(h1) + _winapi.CloseHandle(h2) + return c1, c2 + +# +# Definitions for connections based on sockets +# + + +class SocketListener(object): + ''' + Representation of a socket which is bound to an address and listening + ''' + def __init__(self, address, family, backlog=1): + self._socket = socket.socket(getattr(socket, family)) + try: + # SO_REUSEADDR has different semantics on Windows (issue #2550). + if os.name == 'posix': + self._socket.setsockopt(socket.SOL_SOCKET, + socket.SO_REUSEADDR, 1) + self._socket.setblocking(True) + self._socket.bind(address) + self._socket.listen(backlog) + self._address = self._socket.getsockname() + except OSError: + self._socket.close() + raise + self._family = family + self._last_accepted = None + + if family == 'AF_UNIX': + self._unlink = Finalize( + self, os.unlink, args=(address, ), exitpriority=0 + ) + else: + self._unlink = None + + def accept(self): + while True: + try: + s, self._last_accepted = self._socket.accept() + except OSError as exc: + if exc.errno == errno.EINTR: + continue + raise + else: + break + s.setblocking(True) + return Connection(s.detach()) + + def close(self): + self._socket.close() + if self._unlink is not None: + self._unlink() + + +def SocketClient(address): + ''' + Return a connection object connected to the socket given by `address` + ''' + family = address_type(address) + with socket.socket(getattr(socket, family)) as s: + s.setblocking(True) + s.connect(address) + return Connection(s.detach()) + +# +# Definitions for connections based on named pipes +# + +if sys.platform == 'win32': + + class PipeListener(object): + ''' + Representation of a named pipe + ''' + def __init__(self, address, backlog=None): + self._address = address + self._handle_queue = [self._new_handle(first=True)] + + self._last_accepted = None + sub_debug('listener created with address=%r', self._address) + self.close = Finalize( + self, PipeListener._finalize_pipe_listener, + args=(self._handle_queue, self._address), exitpriority=0 + ) + + def _new_handle(self, first=False): + flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED + if first: + flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE + return _winapi.CreateNamedPipe( + self._address, flags, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, + _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL + ) + + def accept(self): + self._handle_queue.append(self._new_handle()) + handle = self._handle_queue.pop(0) + try: + ov = _winapi.ConnectNamedPipe(handle, overlapped=True) + except OSError as e: + if e.winerror != _winapi.ERROR_NO_DATA: + raise + # ERROR_NO_DATA can occur if a client has already connected, + # written data and then disconnected -- see Issue 14725. + else: + try: + _winapi.WaitForMultipleObjects([ov.event], False, INFINITE) + except: + ov.cancel() + _winapi.CloseHandle(handle) + raise + finally: + _, err = ov.GetOverlappedResult(True) + assert err == 0 + return PipeConnection(handle) + + @staticmethod + def _finalize_pipe_listener(queue, address): + sub_debug('closing listener with address=%r', address) + for handle in queue: + _winapi.CloseHandle(handle) + + def PipeClient(address, + errors=(_winapi.ERROR_SEM_TIMEOUT, + _winapi.ERROR_PIPE_BUSY)): + ''' + Return a connection object connected to the pipe given by `address` + ''' + t = _init_timeout() + while 1: + try: + _winapi.WaitNamedPipe(address, 1000) + h = _winapi.CreateFile( + address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE, + 0, _winapi.NULL, _winapi.OPEN_EXISTING, + _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL + ) + except OSError as e: + if e.winerror not in errors or _check_timeout(t): + raise + else: + break + else: + raise + + _winapi.SetNamedPipeHandleState( + h, _winapi.PIPE_READMODE_MESSAGE, None, None + ) + return PipeConnection(h) + +# +# Authentication stuff +# + +MESSAGE_LENGTH = 20 + +CHALLENGE = b'#CHALLENGE#' +WELCOME = b'#WELCOME#' +FAILURE = b'#FAILURE#' + + +def deliver_challenge(connection, authkey): + import hmac + assert isinstance(authkey, bytes) + message = os.urandom(MESSAGE_LENGTH) + connection.send_bytes(CHALLENGE + message) + digest = hmac.new(authkey, message).digest() + response = connection.recv_bytes(256) # reject large message + if response == digest: + connection.send_bytes(WELCOME) + else: + connection.send_bytes(FAILURE) + raise AuthenticationError('digest received was wrong') + + +def answer_challenge(connection, authkey): + import hmac + assert isinstance(authkey, bytes) + message = connection.recv_bytes(256) # reject large message + assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message + message = message[len(CHALLENGE):] + digest = hmac.new(authkey, message).digest() + connection.send_bytes(digest) + response = connection.recv_bytes(256) # reject large message + if response != WELCOME: + raise AuthenticationError('digest sent was rejected') + +# +# Support for using xmlrpclib for serialization +# + + +class ConnectionWrapper(object): + + def __init__(self, conn, dumps, loads): + self._conn = conn + self._dumps = dumps + self._loads = loads + for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'): + obj = getattr(conn, attr) + setattr(self, attr, obj) + + def send(self, obj): + s = self._dumps(obj) + self._conn.send_bytes(s) + + def recv(self): + s = self._conn.recv_bytes() + return self._loads(s) + + +def _xml_dumps(obj): + return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8') # noqa + + +def _xml_loads(s): + (obj,), method = xmlrpclib.loads(s.decode('utf-8')) # noqa + return obj + + +class XmlListener(Listener): + def accept(self): + global xmlrpclib + import xmlrpc.client as xmlrpclib # noqa + obj = Listener.accept(self) + return ConnectionWrapper(obj, _xml_dumps, _xml_loads) + + +def XmlClient(*args, **kwds): + global xmlrpclib + import xmlrpc.client as xmlrpclib # noqa + return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads) + +# +# Wait +# + +if sys.platform == 'win32': + + def _exhaustive_wait(handles, timeout): + # Return ALL handles which are currently signalled. (Only + # returning the first signalled might create starvation issues.) + L = list(handles) + ready = [] + while L: + res = _winapi.WaitForMultipleObjects(L, False, timeout) + if res == WAIT_TIMEOUT: + break + elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L): + res -= WAIT_OBJECT_0 + elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L): + res -= WAIT_ABANDONED_0 + else: + raise RuntimeError('Should not get here') + ready.append(L[res]) + L = L[res+1:] + timeout = 0 + return ready + + _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED} + + def wait(object_list, timeout=None): + ''' + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + ''' + if timeout is None: + timeout = INFINITE + elif timeout < 0: + timeout = 0 + else: + timeout = int(timeout * 1000 + 0.5) + + object_list = list(object_list) + waithandle_to_obj = {} + ov_list = [] + ready_objects = set() + ready_handles = set() + + try: + for o in object_list: + try: + fileno = getattr(o, 'fileno') + except AttributeError: + waithandle_to_obj[o.__index__()] = o + else: + # start an overlapped read of length zero + try: + ov, err = _winapi.ReadFile(fileno(), 0, True) + except OSError as e: + err = e.winerror + if err not in _ready_errors: + raise + if err == _winapi.ERROR_IO_PENDING: + ov_list.append(ov) + waithandle_to_obj[ov.event] = o + else: + # If o.fileno() is an overlapped pipe handle and + # err == 0 then there is a zero length message + # in the pipe, but it HAS NOT been consumed. + ready_objects.add(o) + timeout = 0 + + ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout) + finally: + # request that overlapped reads stop + for ov in ov_list: + ov.cancel() + + # wait for all overlapped reads to stop + for ov in ov_list: + try: + _, err = ov.GetOverlappedResult(True) + except OSError as e: + err = e.winerror + if err not in _ready_errors: + raise + if err != _winapi.ERROR_OPERATION_ABORTED: + o = waithandle_to_obj[ov.event] + ready_objects.add(o) + if err == 0: + # If o.fileno() is an overlapped pipe handle then + # a zero length message HAS been consumed. + if hasattr(o, '_got_empty_message'): + o._got_empty_message = True + + ready_objects.update(waithandle_to_obj[h] for h in ready_handles) + return [oj for oj in object_list if oj in ready_objects] + +else: + + if hasattr(select, 'poll'): + def _poll(fds, timeout): + if timeout is not None: + timeout = int(timeout * 1000) # timeout is in milliseconds + fd_map = {} + pollster = select.poll() + for fd in fds: + pollster.register(fd, select.POLLIN) + if hasattr(fd, 'fileno'): + fd_map[fd.fileno()] = fd + else: + fd_map[fd] = fd + ls = [] + for fd, event in pollster.poll(timeout): + if event & select.POLLNVAL: + raise ValueError('invalid file descriptor %i' % fd) + ls.append(fd_map[fd]) + return ls + else: + def _poll(fds, timeout): # noqa + return select.select(fds, [], [], timeout)[0] + + def wait(object_list, timeout=None): # noqa + ''' + Wait till an object in object_list is ready/readable. + + Returns list of those objects in object_list which are ready/readable. + ''' + if timeout is not None: + if timeout <= 0: + return _poll(object_list, 0) + else: + deadline = monotonic() + timeout + while True: + try: + return _poll(object_list, timeout) + except OSError as e: + if e.errno != errno.EINTR: + raise + if timeout is not None: + timeout = deadline - monotonic() diff --git a/thesisenv/lib/python3.6/site-packages/billiard/py3/reduction.py b/thesisenv/lib/python3.6/site-packages/billiard/py3/reduction.py new file mode 100644 index 0000000..e5c19bd --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/py3/reduction.py @@ -0,0 +1,249 @@ +# +# Module which deals with pickling of objects. +# +# multiprocessing/reduction.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# +from __future__ import absolute_import + +import copyreg +import functools +import io +import os +import pickle +import socket +import sys + +__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] + + +HAVE_SEND_HANDLE = (sys.platform == 'win32' or + (hasattr(socket, 'CMSG_LEN') and + hasattr(socket, 'SCM_RIGHTS') and + hasattr(socket.socket, 'sendmsg'))) + +# +# Pickler subclass +# + + +class ForkingPickler(pickle.Pickler): + '''Pickler subclass used by multiprocessing.''' + _extra_reducers = {} + _copyreg_dispatch_table = copyreg.dispatch_table + + def __init__(self, *args): + super().__init__(*args) + self.dispatch_table = self._copyreg_dispatch_table.copy() + self.dispatch_table.update(self._extra_reducers) + + @classmethod + def register(cls, type, reduce): + '''Register a reduce function for a type.''' + cls._extra_reducers[type] = reduce + + @classmethod + def dumps(cls, obj, protocol=None): + buf = io.BytesIO() + cls(buf, protocol).dump(obj) + return buf.getbuffer() + + loads = pickle.loads + +register = ForkingPickler.register + + +def dump(obj, file, protocol=None): + '''Replacement for pickle.dump() using ForkingPickler.''' + ForkingPickler(file, protocol).dump(obj) + +# +# Platform specific definitions +# + +if sys.platform == 'win32': + # Windows + __all__ += ['DupHandle', 'duplicate', 'steal_handle'] + import _winapi + + def duplicate(handle, target_process=None, inheritable=False): + '''Duplicate a handle. (target_process is a handle not a pid!)''' + if target_process is None: + target_process = _winapi.GetCurrentProcess() + return _winapi.DuplicateHandle( + _winapi.GetCurrentProcess(), handle, target_process, + 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) + + def steal_handle(source_pid, handle): + '''Steal a handle from process identified by source_pid.''' + source_process_handle = _winapi.OpenProcess( + _winapi.PROCESS_DUP_HANDLE, False, source_pid) + try: + return _winapi.DuplicateHandle( + source_process_handle, handle, + _winapi.GetCurrentProcess(), 0, False, + _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) + finally: + _winapi.CloseHandle(source_process_handle) + + def send_handle(conn, handle, destination_pid): + '''Send a handle over a local connection.''' + dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) + conn.send(dh) + + def recv_handle(conn): + '''Receive a handle over a local connection.''' + return conn.recv().detach() + + class DupHandle(object): + '''Picklable wrapper for a handle.''' + def __init__(self, handle, access, pid=None): + if pid is None: + # We just duplicate the handle in the current process and + # let the receiving process steal the handle. + pid = os.getpid() + proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) + try: + self._handle = _winapi.DuplicateHandle( + _winapi.GetCurrentProcess(), + handle, proc, access, False, 0) + finally: + _winapi.CloseHandle(proc) + self._access = access + self._pid = pid + + def detach(self): + '''Get the handle. This should only be called once.''' + # retrieve handle from process which currently owns it + if self._pid == os.getpid(): + # The handle has already been duplicated for this process. + return self._handle + # We must steal the handle from the process whose pid is self._pid. + proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, + self._pid) + try: + return _winapi.DuplicateHandle( + proc, self._handle, _winapi.GetCurrentProcess(), + self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) + finally: + _winapi.CloseHandle(proc) + +else: + # Unix + __all__ += ['DupFd', 'sendfds', 'recvfds'] + import array + + # On MacOSX we should acknowledge receipt of fds -- see Issue14669 + ACKNOWLEDGE = sys.platform == 'darwin' + + def sendfds(sock, fds): + '''Send an array of fds over an AF_UNIX socket.''' + fds = array.array('i', fds) + msg = bytes([len(fds) % 256]) + sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) + if ACKNOWLEDGE and sock.recv(1) != b'A': + raise RuntimeError('did not receive acknowledgement of fd') + + def recvfds(sock, size): + '''Receive an array of fds over an AF_UNIX socket.''' + a = array.array('i') + bytes_size = a.itemsize * size + msg, ancdata, flags, addr = sock.recvmsg( + 1, socket.CMSG_LEN(bytes_size), + ) + if not msg and not ancdata: + raise EOFError + try: + if ACKNOWLEDGE: + sock.send(b'A') + if len(ancdata) != 1: + raise RuntimeError( + 'received %d items of ancdata' % len(ancdata), + ) + cmsg_level, cmsg_type, cmsg_data = ancdata[0] + if (cmsg_level == socket.SOL_SOCKET and + cmsg_type == socket.SCM_RIGHTS): + if len(cmsg_data) % a.itemsize != 0: + raise ValueError + a.frombytes(cmsg_data) + assert len(a) % 256 == msg[0] + return list(a) + except (ValueError, IndexError): + pass + raise RuntimeError('Invalid data received') + + def send_handle(conn, handle, destination_pid): # noqa + '''Send a handle over a local connection.''' + fd = conn.fileno() + with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s: + sendfds(s, [handle]) + + def recv_handle(conn): # noqa + '''Receive a handle over a local connection.''' + fd = conn.fileno() + with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s: + return recvfds(s, 1)[0] + + def DupFd(fd): + '''Return a wrapper for an fd.''' + from ..forking import Popen + return Popen.duplicate_for_child(fd) + +# +# Try making some callable types picklable +# + + +def _reduce_method(m): + if m.__self__ is None: + return getattr, (m.__class__, m.__func__.__name__) + else: + return getattr, (m.__self__, m.__func__.__name__) + + +class _C: + def f(self): + pass +register(type(_C().f), _reduce_method) + + +def _reduce_method_descriptor(m): + return getattr, (m.__objclass__, m.__name__) +register(type(list.append), _reduce_method_descriptor) +register(type(int.__add__), _reduce_method_descriptor) + + +def _reduce_partial(p): + return _rebuild_partial, (p.func, p.args, p.keywords or {}) + + +def _rebuild_partial(func, args, keywords): + return functools.partial(func, *args, **keywords) +register(functools.partial, _reduce_partial) + +# +# Make sockets picklable +# + +if sys.platform == 'win32': + + def _reduce_socket(s): + from ..resource_sharer import DupSocket + return _rebuild_socket, (DupSocket(s),) + + def _rebuild_socket(ds): + return ds.detach() + register(socket.socket, _reduce_socket) + +else: + + def _reduce_socket(s): # noqa + df = DupFd(s.fileno()) + return _rebuild_socket, (df, s.family, s.type, s.proto) + + def _rebuild_socket(df, family, type, proto): # noqa + fd = df.detach() + return socket.socket(family, type, proto, fileno=fd) + register(socket.socket, _reduce_socket) diff --git a/thesisenv/lib/python3.6/site-packages/billiard/queues.py b/thesisenv/lib/python3.6/site-packages/billiard/queues.py new file mode 100644 index 0000000..bd0a328 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/queues.py @@ -0,0 +1,372 @@ +# +# Module implementing queues +# +# multiprocessing/queues.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# +from __future__ import absolute_import + +import sys +import os +import threading +import collections +import weakref +import errno + +from . import Pipe +from ._ext import _billiard +from .compat import get_errno +from .five import monotonic +from .synchronize import Lock, BoundedSemaphore, Semaphore, Condition +from .util import debug, error, info, Finalize, register_after_fork +from .five import Empty, Full +from .forking import assert_spawning + +__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] + + +class Queue(object): + ''' + Queue type using a pipe, buffer and thread + ''' + def __init__(self, maxsize=0): + if maxsize <= 0: + maxsize = _billiard.SemLock.SEM_VALUE_MAX + self._maxsize = maxsize + self._reader, self._writer = Pipe(duplex=False) + self._rlock = Lock() + self._opid = os.getpid() + if sys.platform == 'win32': + self._wlock = None + else: + self._wlock = Lock() + self._sem = BoundedSemaphore(maxsize) + # For use by concurrent.futures + self._ignore_epipe = False + + self._after_fork() + + if sys.platform != 'win32': + register_after_fork(self, Queue._after_fork) + + def __getstate__(self): + assert_spawning(self) + return (self._ignore_epipe, self._maxsize, self._reader, self._writer, + self._rlock, self._wlock, self._sem, self._opid) + + def __setstate__(self, state): + (self._ignore_epipe, self._maxsize, self._reader, self._writer, + self._rlock, self._wlock, self._sem, self._opid) = state + self._after_fork() + + def _after_fork(self): + debug('Queue._after_fork()') + self._notempty = threading.Condition(threading.Lock()) + self._buffer = collections.deque() + self._thread = None + self._jointhread = None + self._joincancelled = False + self._closed = False + self._close = None + self._send = self._writer.send + self._recv = self._reader.recv + self._poll = self._reader.poll + + def put(self, obj, block=True, timeout=None): + assert not self._closed + if not self._sem.acquire(block, timeout): + raise Full + + with self._notempty: + if self._thread is None: + self._start_thread() + self._buffer.append(obj) + self._notempty.notify() + + def get(self, block=True, timeout=None): + if block and timeout is None: + with self._rlock: + res = self._recv() + self._sem.release() + return res + + else: + if block: + deadline = monotonic() + timeout + if not self._rlock.acquire(block, timeout): + raise Empty + try: + if block: + timeout = deadline - monotonic() + if timeout < 0 or not self._poll(timeout): + raise Empty + elif not self._poll(): + raise Empty + res = self._recv() + self._sem.release() + return res + finally: + self._rlock.release() + + def qsize(self): + # Raises NotImplementedError on Mac OSX because + # of broken sem_getvalue() + return self._maxsize - self._sem._semlock._get_value() + + def empty(self): + return not self._poll() + + def full(self): + return self._sem._semlock._is_zero() + + def get_nowait(self): + return self.get(False) + + def put_nowait(self, obj): + return self.put(obj, False) + + def close(self): + self._closed = True + self._reader.close() + if self._close: + self._close() + + def join_thread(self): + debug('Queue.join_thread()') + assert self._closed + if self._jointhread: + self._jointhread() + + def cancel_join_thread(self): + debug('Queue.cancel_join_thread()') + self._joincancelled = True + try: + self._jointhread.cancel() + except AttributeError: + pass + + def _start_thread(self): + debug('Queue._start_thread()') + + # Start thread which transfers data from buffer to pipe + self._buffer.clear() + self._thread = threading.Thread( + target=Queue._feed, + args=(self._buffer, self._notempty, self._send, + self._wlock, self._writer.close, self._ignore_epipe), + name='QueueFeederThread' + ) + self._thread.daemon = True + + debug('doing self._thread.start()') + self._thread.start() + debug('... done self._thread.start()') + + # On process exit we will wait for data to be flushed to pipe. + # + # However, if this process created the queue then all + # processes which use the queue will be descendants of this + # process. Therefore waiting for the queue to be flushed + # is pointless once all the child processes have been joined. + created_by_this_process = (self._opid == os.getpid()) + if not self._joincancelled and not created_by_this_process: + self._jointhread = Finalize( + self._thread, Queue._finalize_join, + [weakref.ref(self._thread)], + exitpriority=-5 + ) + + # Send sentinel to the thread queue object when garbage collected + self._close = Finalize( + self, Queue._finalize_close, + [self._buffer, self._notempty], + exitpriority=10 + ) + + @staticmethod + def _finalize_join(twr): + debug('joining queue thread') + thread = twr() + if thread is not None: + thread.join() + debug('... queue thread joined') + else: + debug('... queue thread already dead') + + @staticmethod + def _finalize_close(buffer, notempty): + debug('telling queue thread to quit') + with notempty: + buffer.append(_sentinel) + notempty.notify() + + @staticmethod + def _feed(buffer, notempty, send, writelock, close, ignore_epipe): + debug('starting thread to feed data to pipe') + from .util import is_exiting + + ncond = notempty + nwait = notempty.wait + bpopleft = buffer.popleft + sentinel = _sentinel + if sys.platform != 'win32': + wlock = writelock + else: + wlock = None + + try: + while 1: + with ncond: + if not buffer: + nwait() + try: + while 1: + obj = bpopleft() + if obj is sentinel: + debug('feeder thread got sentinel -- exiting') + close() + return + + if wlock is None: + send(obj) + else: + with wlock: + send(obj) + except IndexError: + pass + except Exception as exc: + if ignore_epipe and get_errno(exc) == errno.EPIPE: + return + # Since this runs in a daemon thread the resources it uses + # may be become unusable while the process is cleaning up. + # We ignore errors which happen after the process has + # started to cleanup. + try: + if is_exiting(): + info('error in queue thread: %r', exc, exc_info=True) + else: + if not error('error in queue thread: %r', exc, + exc_info=True): + import traceback + traceback.print_exc() + except Exception: + pass + +_sentinel = object() + + +class JoinableQueue(Queue): + ''' + A queue type which also supports join() and task_done() methods + + Note that if you do not call task_done() for each finished task then + eventually the counter's semaphore may overflow causing Bad Things + to happen. + ''' + + def __init__(self, maxsize=0): + Queue.__init__(self, maxsize) + self._unfinished_tasks = Semaphore(0) + self._cond = Condition() + + def __getstate__(self): + return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) + + def __setstate__(self, state): + Queue.__setstate__(self, state[:-2]) + self._cond, self._unfinished_tasks = state[-2:] + + def put(self, obj, block=True, timeout=None): + assert not self._closed + if not self._sem.acquire(block, timeout): + raise Full + + with self._notempty: + with self._cond: + if self._thread is None: + self._start_thread() + self._buffer.append(obj) + self._unfinished_tasks.release() + self._notempty.notify() + + def task_done(self): + with self._cond: + if not self._unfinished_tasks.acquire(False): + raise ValueError('task_done() called too many times') + if self._unfinished_tasks._semlock._is_zero(): + self._cond.notify_all() + + def join(self): + with self._cond: + if not self._unfinished_tasks._semlock._is_zero(): + self._cond.wait() + + +class _SimpleQueue(object): + ''' + Simplified Queue type -- really just a locked pipe + ''' + + def __init__(self, rnonblock=False, wnonblock=False): + self._reader, self._writer = Pipe( + duplex=False, rnonblock=rnonblock, wnonblock=wnonblock, + ) + self._poll = self._reader.poll + self._rlock = self._wlock = None + self._make_methods() + + def empty(self): + return not self._poll() + + def __getstate__(self): + assert_spawning(self) + return (self._reader, self._writer, self._rlock, self._wlock) + + def __setstate__(self, state): + (self._reader, self._writer, self._rlock, self._wlock) = state + self._make_methods() + + def _make_methods(self): + recv = self._reader.recv + try: + recv_payload = self._reader.recv_payload + except AttributeError: + recv_payload = self._reader.recv_bytes + rlock = self._rlock + + if rlock is not None: + def get(): + with rlock: + return recv() + self.get = get + + def get_payload(): + with rlock: + return recv_payload() + self.get_payload = get_payload + else: + self.get = recv + self.get_payload = recv_payload + + if self._wlock is None: + # writes to a message oriented win32 pipe are atomic + self.put = self._writer.send + else: + send = self._writer.send + wlock = self._wlock + + def put(obj): + with wlock: + return send(obj) + self.put = put + + +class SimpleQueue(_SimpleQueue): + + def __init__(self): + self._reader, self._writer = Pipe(duplex=False) + self._rlock = Lock() + self._wlock = Lock() if sys.platform != 'win32' else None + self._make_methods() diff --git a/thesisenv/lib/python3.6/site-packages/billiard/reduction.py b/thesisenv/lib/python3.6/site-packages/billiard/reduction.py new file mode 100644 index 0000000..c334b3e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/reduction.py @@ -0,0 +1,10 @@ +from __future__ import absolute_import + +import sys + +if sys.version_info[0] == 3: + from .py3 import reduction +else: + from .py2 import reduction # noqa + +sys.modules[__name__] = reduction diff --git a/thesisenv/lib/python3.6/site-packages/billiard/sharedctypes.py b/thesisenv/lib/python3.6/site-packages/billiard/sharedctypes.py new file mode 100644 index 0000000..e336c80 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/sharedctypes.py @@ -0,0 +1,248 @@ +# +# Module which supports allocation of ctypes objects from shared memory +# +# multiprocessing/sharedctypes.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# +from __future__ import absolute_import + +import ctypes +import weakref + +from . import heap, RLock +from .five import int_types +from .forking import assert_spawning +from .reduction import ForkingPickler + +__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] + +typecode_to_type = { + 'c': ctypes.c_char, 'u': ctypes.c_wchar, + 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, + 'h': ctypes.c_short, 'H': ctypes.c_ushort, + 'i': ctypes.c_int, 'I': ctypes.c_uint, + 'l': ctypes.c_long, 'L': ctypes.c_ulong, + 'f': ctypes.c_float, 'd': ctypes.c_double +} + + +def _new_value(type_): + size = ctypes.sizeof(type_) + wrapper = heap.BufferWrapper(size) + return rebuild_ctype(type_, wrapper, None) + + +def RawValue(typecode_or_type, *args): + ''' + Returns a ctypes object allocated from shared memory + ''' + type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) + obj = _new_value(type_) + ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) + obj.__init__(*args) + return obj + + +def RawArray(typecode_or_type, size_or_initializer): + ''' + Returns a ctypes array allocated from shared memory + ''' + type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) + if isinstance(size_or_initializer, int_types): + type_ = type_ * size_or_initializer + obj = _new_value(type_) + ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) + return obj + else: + type_ = type_ * len(size_or_initializer) + result = _new_value(type_) + result.__init__(*size_or_initializer) + return result + + +def Value(typecode_or_type, *args, **kwds): + ''' + Return a synchronization wrapper for a Value + ''' + lock = kwds.pop('lock', None) + if kwds: + raise ValueError( + 'unrecognized keyword argument(s): %s' % list(kwds.keys())) + obj = RawValue(typecode_or_type, *args) + if lock is False: + return obj + if lock in (True, None): + lock = RLock() + if not hasattr(lock, 'acquire'): + raise AttributeError("'%r' has no method 'acquire'" % lock) + return synchronized(obj, lock) + + +def Array(typecode_or_type, size_or_initializer, **kwds): + ''' + Return a synchronization wrapper for a RawArray + ''' + lock = kwds.pop('lock', None) + if kwds: + raise ValueError( + 'unrecognized keyword argument(s): %s' % list(kwds.keys())) + obj = RawArray(typecode_or_type, size_or_initializer) + if lock is False: + return obj + if lock in (True, None): + lock = RLock() + if not hasattr(lock, 'acquire'): + raise AttributeError("'%r' has no method 'acquire'" % lock) + return synchronized(obj, lock) + + +def copy(obj): + new_obj = _new_value(type(obj)) + ctypes.pointer(new_obj)[0] = obj + return new_obj + + +def synchronized(obj, lock=None): + assert not isinstance(obj, SynchronizedBase), 'object already synchronized' + + if isinstance(obj, ctypes._SimpleCData): + return Synchronized(obj, lock) + elif isinstance(obj, ctypes.Array): + if obj._type_ is ctypes.c_char: + return SynchronizedString(obj, lock) + return SynchronizedArray(obj, lock) + else: + cls = type(obj) + try: + scls = class_cache[cls] + except KeyError: + names = [field[0] for field in cls._fields_] + d = dict((name, make_property(name)) for name in names) + classname = 'Synchronized' + cls.__name__ + scls = class_cache[cls] = type(classname, (SynchronizedBase,), d) + return scls(obj, lock) + +# +# Functions for pickling/unpickling +# + + +def reduce_ctype(obj): + assert_spawning(obj) + if isinstance(obj, ctypes.Array): + return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_) + else: + return rebuild_ctype, (type(obj), obj._wrapper, None) + + +def rebuild_ctype(type_, wrapper, length): + if length is not None: + type_ = type_ * length + ForkingPickler.register(type_, reduce_ctype) + obj = type_.from_address(wrapper.get_address()) + obj._wrapper = wrapper + return obj + +# +# Function to create properties +# + + +def make_property(name): + try: + return prop_cache[name] + except KeyError: + d = {} + exec(template % ((name, ) * 7), d) + prop_cache[name] = d[name] + return d[name] + +template = ''' +def get%s(self): + self.acquire() + try: + return self._obj.%s + finally: + self.release() +def set%s(self, value): + self.acquire() + try: + self._obj.%s = value + finally: + self.release() +%s = property(get%s, set%s) +''' + +prop_cache = {} +class_cache = weakref.WeakKeyDictionary() + +# +# Synchronized wrappers +# + + +class SynchronizedBase(object): + + def __init__(self, obj, lock=None): + self._obj = obj + self._lock = lock or RLock() + self.acquire = self._lock.acquire + self.release = self._lock.release + + def __reduce__(self): + assert_spawning(self) + return synchronized, (self._obj, self._lock) + + def get_obj(self): + return self._obj + + def get_lock(self): + return self._lock + + def __repr__(self): + return '<%s wrapper for %s>' % (type(self).__name__, self._obj) + + +class Synchronized(SynchronizedBase): + value = make_property('value') + + +class SynchronizedArray(SynchronizedBase): + + def __len__(self): + return len(self._obj) + + def __getitem__(self, i): + self.acquire() + try: + return self._obj[i] + finally: + self.release() + + def __setitem__(self, i, value): + self.acquire() + try: + self._obj[i] = value + finally: + self.release() + + def __getslice__(self, start, stop): + self.acquire() + try: + return self._obj[start:stop] + finally: + self.release() + + def __setslice__(self, start, stop, values): + self.acquire() + try: + self._obj[start:stop] = values + finally: + self.release() + + +class SynchronizedString(SynchronizedArray): + value = make_property('value') + raw = make_property('raw') diff --git a/thesisenv/lib/python3.6/site-packages/billiard/synchronize.py b/thesisenv/lib/python3.6/site-packages/billiard/synchronize.py new file mode 100644 index 0000000..45fdbcc --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/synchronize.py @@ -0,0 +1,449 @@ +# +# Module implementing synchronization primitives +# +# multiprocessing/synchronize.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# +from __future__ import absolute_import + +import itertools +import os +import signal +import sys +import threading + + +from ._ext import _billiard, ensure_SemLock +from .five import range, monotonic +from .process import current_process +from .util import Finalize, register_after_fork, debug +from .forking import assert_spawning, Popen +from .compat import bytes, closerange + +__all__ = [ + 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event', +] + +# Try to import the mp.synchronize module cleanly, if it fails +# raise ImportError for platforms lacking a working sem_open implementation. +# See issue 3770 +ensure_SemLock() + +# +# Constants +# + +RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) +SEM_VALUE_MAX = _billiard.SemLock.SEM_VALUE_MAX + +try: + sem_unlink = _billiard.SemLock.sem_unlink +except AttributeError: # pragma: no cover + try: + # Py3.4+ implements sem_unlink and the semaphore must be named + from _multiprocessing import sem_unlink # noqa + except ImportError: + sem_unlink = None # noqa + +# +# Base class for semaphores and mutexes; wraps `_billiard.SemLock` +# + + +def _semname(sl): + try: + return sl.name + except AttributeError: + pass + + +class SemLock(object): + _counter = itertools.count() + + def __init__(self, kind, value, maxvalue): + from .forking import _forking_is_enabled + unlink_immediately = _forking_is_enabled or sys.platform == 'win32' + if sem_unlink: + sl = self._semlock = _billiard.SemLock( + kind, value, maxvalue, self._make_name(), unlink_immediately) + else: + sl = self._semlock = _billiard.SemLock(kind, value, maxvalue) + + debug('created semlock with handle %s', sl.handle) + self._make_methods() + + if sem_unlink: + + if sys.platform != 'win32': + def _after_fork(obj): + obj._semlock._after_fork() + register_after_fork(self, _after_fork) + + if _semname(self._semlock) is not None: + # We only get here if we are on Unix with forking + # disabled. When the object is garbage collected or the + # process shuts down we unlink the semaphore name + Finalize(self, sem_unlink, (self._semlock.name,), + exitpriority=0) + # In case of abnormal termination unlink semaphore name + _cleanup_semaphore_if_leaked(self._semlock.name) + + def _make_methods(self): + self.acquire = self._semlock.acquire + self.release = self._semlock.release + + def __enter__(self): + return self._semlock.__enter__() + + def __exit__(self, *args): + return self._semlock.__exit__(*args) + + def __getstate__(self): + assert_spawning(self) + sl = self._semlock + state = (Popen.duplicate_for_child(sl.handle), sl.kind, sl.maxvalue) + try: + state += (sl.name, ) + except AttributeError: + pass + return state + + def __setstate__(self, state): + self._semlock = _billiard.SemLock._rebuild(*state) + debug('recreated blocker with handle %r', state[0]) + self._make_methods() + + @staticmethod + def _make_name(): + return '/%s-%s-%s' % (current_process()._semprefix, + os.getpid(), next(SemLock._counter)) + + +class Semaphore(SemLock): + + def __init__(self, value=1): + SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX) + + def get_value(self): + return self._semlock._get_value() + + def __repr__(self): + try: + value = self._semlock._get_value() + except Exception: + value = 'unknown' + return '' % value + + +class BoundedSemaphore(Semaphore): + + def __init__(self, value=1): + SemLock.__init__(self, SEMAPHORE, value, value) + + def __repr__(self): + try: + value = self._semlock._get_value() + except Exception: + value = 'unknown' + return '' % \ + (value, self._semlock.maxvalue) + + +class Lock(SemLock): + ''' + Non-recursive lock. + ''' + + def __init__(self): + SemLock.__init__(self, SEMAPHORE, 1, 1) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = current_process().name + if threading.currentThread().name != 'MainThread': + name += '|' + threading.currentThread().name + elif self._semlock._get_value() == 1: + name = 'None' + elif self._semlock._count() > 0: + name = 'SomeOtherThread' + else: + name = 'SomeOtherProcess' + except Exception: + name = 'unknown' + return '' % name + + +class RLock(SemLock): + ''' + Recursive lock + ''' + + def __init__(self): + SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = current_process().name + if threading.currentThread().name != 'MainThread': + name += '|' + threading.currentThread().name + count = self._semlock._count() + elif self._semlock._get_value() == 1: + name, count = 'None', 0 + elif self._semlock._count() > 0: + name, count = 'SomeOtherThread', 'nonzero' + else: + name, count = 'SomeOtherProcess', 'nonzero' + except Exception: + name, count = 'unknown', 'unknown' + return '' % (name, count) + + +class Condition(object): + ''' + Condition variable + ''' + + def __init__(self, lock=None): + self._lock = lock or RLock() + self._sleeping_count = Semaphore(0) + self._woken_count = Semaphore(0) + self._wait_semaphore = Semaphore(0) + self._make_methods() + + def __getstate__(self): + assert_spawning(self) + return (self._lock, self._sleeping_count, + self._woken_count, self._wait_semaphore) + + def __setstate__(self, state): + (self._lock, self._sleeping_count, + self._woken_count, self._wait_semaphore) = state + self._make_methods() + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + def _make_methods(self): + self.acquire = self._lock.acquire + self.release = self._lock.release + + def __repr__(self): + try: + num_waiters = (self._sleeping_count._semlock._get_value() - + self._woken_count._semlock._get_value()) + except Exception: + num_waiters = 'unkown' + return '' % (self._lock, num_waiters) + + def wait(self, timeout=None): + assert self._lock._semlock._is_mine(), \ + 'must acquire() condition before using wait()' + + # indicate that this thread is going to sleep + self._sleeping_count.release() + + # release lock + count = self._lock._semlock._count() + for i in range(count): + self._lock.release() + + try: + # wait for notification or timeout + ret = self._wait_semaphore.acquire(True, timeout) + finally: + # indicate that this thread has woken + self._woken_count.release() + + # reacquire lock + for i in range(count): + self._lock.acquire() + return ret + + def notify(self): + assert self._lock._semlock._is_mine(), 'lock is not owned' + assert not self._wait_semaphore.acquire(False) + + # to take account of timeouts since last notify() we subtract + # woken_count from sleeping_count and rezero woken_count + while self._woken_count.acquire(False): + res = self._sleeping_count.acquire(False) + assert res + + if self._sleeping_count.acquire(False): # try grabbing a sleeper + self._wait_semaphore.release() # wake up one sleeper + self._woken_count.acquire() # wait for sleeper to wake + + # rezero _wait_semaphore in case a timeout just happened + self._wait_semaphore.acquire(False) + + def notify_all(self): + assert self._lock._semlock._is_mine(), 'lock is not owned' + assert not self._wait_semaphore.acquire(False) + + # to take account of timeouts since last notify*() we subtract + # woken_count from sleeping_count and rezero woken_count + while self._woken_count.acquire(False): + res = self._sleeping_count.acquire(False) + assert res + + sleepers = 0 + while self._sleeping_count.acquire(False): + self._wait_semaphore.release() # wake up one sleeper + sleepers += 1 + + if sleepers: + for i in range(sleepers): + self._woken_count.acquire() # wait for a sleeper to wake + + # rezero wait_semaphore in case some timeouts just happened + while self._wait_semaphore.acquire(False): + pass + + def wait_for(self, predicate, timeout=None): + result = predicate() + if result: + return result + if timeout is not None: + endtime = monotonic() + timeout + else: + endtime = None + waittime = None + while not result: + if endtime is not None: + waittime = endtime - monotonic() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + + +class Event(object): + + def __init__(self): + self._cond = Condition(Lock()) + self._flag = Semaphore(0) + + def is_set(self): + self._cond.acquire() + try: + if self._flag.acquire(False): + self._flag.release() + return True + return False + finally: + self._cond.release() + + def set(self): + self._cond.acquire() + try: + self._flag.acquire(False) + self._flag.release() + self._cond.notify_all() + finally: + self._cond.release() + + def clear(self): + self._cond.acquire() + try: + self._flag.acquire(False) + finally: + self._cond.release() + + def wait(self, timeout=None): + self._cond.acquire() + try: + if self._flag.acquire(False): + self._flag.release() + else: + self._cond.wait(timeout) + + if self._flag.acquire(False): + self._flag.release() + return True + return False + finally: + self._cond.release() + + +if sys.platform != 'win32': + # + # Protection against unlinked semaphores if the program ends abnormally + # and forking has been disabled. + # + + def _cleanup_semaphore_if_leaked(name): + name = name.encode('ascii') + bytes('\0', 'ascii') + if len(name) > 512: + # posix guarantees that writes to a pipe of less than PIPE_BUF + # bytes are atomic, and that PIPE_BUF >= 512 + raise ValueError('name too long') + fd = _get_unlinkfd() + bits = os.write(fd, name) + assert bits == len(name) + + def _get_unlinkfd(): + cp = current_process() + if cp._unlinkfd is None: + r, w = os.pipe() + pid = os.fork() + if pid == 0: + try: + from setproctitle import setproctitle + setproctitle("[sem_cleanup for %r]" % cp.pid) + except: + pass + + # Fork a process which will survive until all other processes + # which have a copy of the write end of the pipe have exited. + # The forked process just collects names of semaphores until + # EOF is indicated. Then it tries unlinking all the names it + # has collected. + _collect_names_then_unlink(r) + os._exit(0) + os.close(r) + cp._unlinkfd = w + return cp._unlinkfd + + def _collect_names_then_unlink(r): + # protect the process from ^C and "killall python" etc + signal.signal(signal.SIGINT, signal.SIG_IGN) + signal.signal(signal.SIGTERM, signal.SIG_IGN) + + # close all fds except r + try: + MAXFD = os.sysconf("SC_OPEN_MAX") + except: + MAXFD = 256 + closerange(0, r) + closerange(r + 1, MAXFD) + + # collect data written to pipe + data = [] + while 1: + try: + s = os.read(r, 512) + except: + # XXX IO lock might be held at fork, so don't try + # printing unexpected exception - see issue 6721 + pass + else: + if not s: + break + data.append(s) + + # attempt to unlink each collected name + for name in bytes('', 'ascii').join(data).split(bytes('\0', 'ascii')): + try: + sem_unlink(name.decode('ascii')) + except: + # XXX IO lock might be held at fork, so don't try + # printing unexpected exception - see issue 6721 + pass diff --git a/thesisenv/lib/python3.6/site-packages/billiard/tests/__init__.py b/thesisenv/lib/python3.6/site-packages/billiard/tests/__init__.py new file mode 100644 index 0000000..a87fce1 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/tests/__init__.py @@ -0,0 +1,21 @@ +from __future__ import absolute_import + +import atexit + + +def teardown(): + # Workaround for multiprocessing bug where logging + # is attempted after global already collected at shutdown. + cancelled = set() + try: + import multiprocessing.util + cancelled.add(multiprocessing.util._exit_function) + except (AttributeError, ImportError): + pass + + try: + atexit._exithandlers[:] = [ + e for e in atexit._exithandlers if e[0] not in cancelled + ] + except AttributeError: + pass diff --git a/thesisenv/lib/python3.6/site-packages/billiard/tests/compat.py b/thesisenv/lib/python3.6/site-packages/billiard/tests/compat.py new file mode 100644 index 0000000..30eb853 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/tests/compat.py @@ -0,0 +1,85 @@ +from __future__ import absolute_import + +import sys + + +class WarningMessage(object): + + """Holds the result of a single showwarning() call.""" + + _WARNING_DETAILS = ('message', 'category', 'filename', 'lineno', 'file', + 'line') + + def __init__(self, message, category, filename, lineno, file=None, + line=None): + local_values = locals() + for attr in self._WARNING_DETAILS: + setattr(self, attr, local_values[attr]) + + self._category_name = category and category.__name__ or None + + def __str__(self): + return ('{message : %r, category : %r, filename : %r, lineno : %s, ' + 'line : %r}' % (self.message, self._category_name, + self.filename, self.lineno, self.line)) + + +class catch_warnings(object): + + """A context manager that copies and restores the warnings filter upon + exiting the context. + + The 'record' argument specifies whether warnings should be captured by a + custom implementation of warnings.showwarning() and be appended to a list + returned by the context manager. Otherwise None is returned by the context + manager. The objects appended to the list are arguments whose attributes + mirror the arguments to showwarning(). + + The 'module' argument is to specify an alternative module to the module + named 'warnings' and imported under that name. This argument is only + useful when testing the warnings module itself. + + """ + + def __init__(self, record=False, module=None): + """Specify whether to record warnings and if an alternative module + should be used other than sys.modules['warnings']. + + For compatibility with Python 3.0, please consider all arguments to be + keyword-only. + + """ + self._record = record + self._module = module is None and sys.modules['warnings'] or module + self._entered = False + + def __repr__(self): + args = [] + if self._record: + args.append('record=True') + if self._module is not sys.modules['warnings']: + args.append('module=%r' % self._module) + name = type(self).__name__ + return '%s(%s)' % (name, ', '.join(args)) + + def __enter__(self): + if self._entered: + raise RuntimeError('Cannot enter %r twice' % self) + self._entered = True + self._filters = self._module.filters + self._module.filters = self._filters[:] + self._showwarning = self._module.showwarning + if self._record: + log = [] + + def showwarning(*args, **kwargs): + log.append(WarningMessage(*args, **kwargs)) + + self._module.showwarning = showwarning + return log + + def __exit__(self, *exc_info): + if not self._entered: + raise RuntimeError('Cannot exit %r without entering first' % self) + self._module.filters = self._filters + self._module.showwarning = self._showwarning diff --git a/thesisenv/lib/python3.6/site-packages/billiard/tests/test_common.py b/thesisenv/lib/python3.6/site-packages/billiard/tests/test_common.py new file mode 100644 index 0000000..e15a10f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/tests/test_common.py @@ -0,0 +1,108 @@ +from __future__ import absolute_import + +import os +import signal +import sys + +from contextlib import contextmanager +from time import time + +from nose import SkipTest +from billiard.common import ( + _shutdown_cleanup, + reset_signals, + restart_state, +) + +from .utils import Case + +try: + from unittest.mock import Mock, call, patch +except ImportError: + from mock import Mock, call, patch # noqa + + +def signo(name): + return getattr(signal, name) + + +@contextmanager +def termsigs(default, full): + from billiard import common + prev_def, common.TERMSIGS_DEFAULT = common.TERMSIGS_DEFAULT, default + prev_full, common.TERMSIGS_FULL = common.TERMSIGS_FULL, full + try: + yield + finally: + common.TERMSIGS_DEFAULT, common.TERMSIGS_FULL = prev_def, prev_full + + +class test_reset_signals(Case): + + def setUp(self): + if sys.platform == 'win32': + raise SkipTest('win32: skip') + + def test_shutdown_handler(self): + with patch('sys.exit') as exit: + _shutdown_cleanup(15, Mock()) + self.assertTrue(exit.called) + self.assertEqual(os.WTERMSIG(exit.call_args[0][0]), 15) + + def test_does_not_reset_ignored_signal(self, sigs=['SIGTERM']): + with self.assert_context(sigs, [], signal.SIG_IGN) as (_, SET): + self.assertFalse(SET.called) + + def test_does_not_reset_if_current_is_None(self, sigs=['SIGTERM']): + with self.assert_context(sigs, [], None) as (_, SET): + self.assertFalse(SET.called) + + def test_resets_for_SIG_DFL(self, sigs=['SIGTERM', 'SIGINT', 'SIGUSR1']): + with self.assert_context(sigs, [], signal.SIG_DFL) as (_, SET): + SET.assert_has_calls([ + call(signo(sig), _shutdown_cleanup) for sig in sigs + ]) + + def test_resets_for_obj(self, sigs=['SIGTERM', 'SIGINT', 'SIGUSR1']): + with self.assert_context(sigs, [], object()) as (_, SET): + SET.assert_has_calls([ + call(signo(sig), _shutdown_cleanup) for sig in sigs + ]) + + def test_handles_errors(self, sigs=['SIGTERM']): + for exc in (OSError(), AttributeError(), + ValueError(), RuntimeError()): + with self.assert_context(sigs, [], signal.SIG_DFL, exc) as (_, S): + self.assertTrue(S.called) + + @contextmanager + def assert_context(self, default, full, get_returns=None, set_effect=None): + with termsigs(default, full): + with patch('signal.getsignal') as GET: + with patch('signal.signal') as SET: + GET.return_value = get_returns + SET.side_effect = set_effect + reset_signals() + GET.assert_has_calls([ + call(signo(sig)) for sig in default + ]) + yield GET, SET + + +class test_restart_state(Case): + + def test_raises(self): + s = restart_state(100, 1) # max 100 restarts in 1 second. + s.R = 99 + s.step() + with self.assertRaises(s.RestartFreqExceeded): + s.step() + + def test_time_passed_resets_counter(self): + s = restart_state(100, 10) + s.R, s.T = 100, time() + with self.assertRaises(s.RestartFreqExceeded): + s.step() + s.R, s.T = 100, time() + s.step(time() + 20) + self.assertEqual(s.R, 1) diff --git a/thesisenv/lib/python3.6/site-packages/billiard/tests/test_package.py b/thesisenv/lib/python3.6/site-packages/billiard/tests/test_package.py new file mode 100644 index 0000000..7934718 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/tests/test_package.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import + +import billiard + +from .utils import Case + + +class test_billiard(Case): + + def test_has_version(self): + self.assertTrue(billiard.__version__) + self.assertIsInstance(billiard.__version__, str) diff --git a/thesisenv/lib/python3.6/site-packages/billiard/tests/utils.py b/thesisenv/lib/python3.6/site-packages/billiard/tests/utils.py new file mode 100644 index 0000000..1ac881f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/tests/utils.py @@ -0,0 +1,145 @@ +from __future__ import absolute_import + +import re +import sys +import warnings + +try: + import unittest # noqa + unittest.skip + from unittest.util import safe_repr, unorderable_list_difference +except AttributeError: + import unittest2 as unittest # noqa + from unittest2.util import safe_repr, unorderable_list_difference # noqa + +from billiard.five import string_t, items, values + +from .compat import catch_warnings + +# -- adds assertWarns from recent unittest2, not in Python 2.7. + + +class _AssertRaisesBaseContext(object): + + def __init__(self, expected, test_case, callable_obj=None, + expected_regex=None): + self.expected = expected + self.failureException = test_case.failureException + self.obj_name = None + if isinstance(expected_regex, string_t): + expected_regex = re.compile(expected_regex) + self.expected_regex = expected_regex + + +class _AssertWarnsContext(_AssertRaisesBaseContext): + """A context manager used to implement TestCase.assertWarns* methods.""" + + def __enter__(self): + # The __warningregistry__'s need to be in a pristine state for tests + # to work properly. + warnings.resetwarnings() + for v in values(sys.modules): + if getattr(v, '__warningregistry__', None): + v.__warningregistry__ = {} + self.warnings_manager = catch_warnings(record=True) + self.warnings = self.warnings_manager.__enter__() + warnings.simplefilter('always', self.expected) + return self + + def __exit__(self, exc_type, exc_value, tb): + self.warnings_manager.__exit__(exc_type, exc_value, tb) + if exc_type is not None: + # let unexpected exceptions pass through + return + try: + exc_name = self.expected.__name__ + except AttributeError: + exc_name = str(self.expected) + first_matching = None + for m in self.warnings: + w = m.message + if not isinstance(w, self.expected): + continue + if first_matching is None: + first_matching = w + if (self.expected_regex is not None and + not self.expected_regex.search(str(w))): + continue + # store warning for later retrieval + self.warning = w + self.filename = m.filename + self.lineno = m.lineno + return + # Now we simply try to choose a helpful failure message + if first_matching is not None: + raise self.failureException( + '%r does not match %r' % ( + self.expected_regex.pattern, str(first_matching))) + if self.obj_name: + raise self.failureException( + '%s not triggered by %s' % (exc_name, self.obj_name)) + else: + raise self.failureException('%s not triggered' % exc_name) + + +class Case(unittest.TestCase): + + def assertWarns(self, expected_warning): + return _AssertWarnsContext(expected_warning, self, None) + + def assertWarnsRegex(self, expected_warning, expected_regex): + return _AssertWarnsContext(expected_warning, self, + None, expected_regex) + + def assertDictContainsSubset(self, expected, actual, msg=None): + missing, mismatched = [], [] + + for key, value in items(expected): + if key not in actual: + missing.append(key) + elif value != actual[key]: + mismatched.append('%s, expected: %s, actual: %s' % ( + safe_repr(key), safe_repr(value), + safe_repr(actual[key]))) + + if not (missing or mismatched): + return + + standard_msg = '' + if missing: + standard_msg = 'Missing: %s' % ','.join(map(safe_repr, missing)) + + if mismatched: + if standard_msg: + standard_msg += '; ' + standard_msg += 'Mismatched values: %s' % ( + ','.join(mismatched)) + + self.fail(self._formatMessage(msg, standard_msg)) + + def assertItemsEqual(self, expected_seq, actual_seq, msg=None): + missing = unexpected = None + try: + expected = sorted(expected_seq) + actual = sorted(actual_seq) + except TypeError: + # Unsortable items (example: set(), complex(), ...) + expected = list(expected_seq) + actual = list(actual_seq) + missing, unexpected = unorderable_list_difference( + expected, actual) + else: + return self.assertSequenceEqual(expected, actual, msg=msg) + + errors = [] + if missing: + errors.append( + 'Expected, but missing:\n %s' % (safe_repr(missing), ), + ) + if unexpected: + errors.append( + 'Unexpected, but present:\n %s' % (safe_repr(unexpected), ), + ) + if errors: + standardMsg = '\n'.join(errors) + self.fail(self._formatMessage(msg, standardMsg)) diff --git a/thesisenv/lib/python3.6/site-packages/billiard/util.py b/thesisenv/lib/python3.6/site-packages/billiard/util.py new file mode 100644 index 0000000..8b5e4c3 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/billiard/util.py @@ -0,0 +1,152 @@ +# +# Module providing various facilities to other parts of the package +# +# billiard/util.py +# +# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt +# Licensed to PSF under a Contributor Agreement. +# +from __future__ import absolute_import + +import errno +import functools +import atexit + +from multiprocessing.util import ( # noqa + _afterfork_registry, + _afterfork_counter, + _exit_function, + _finalizer_registry, + _finalizer_counter, + Finalize, + ForkAwareLocal, + ForkAwareThreadLock, + get_temp_dir, + is_exiting, + register_after_fork, + _run_after_forkers, + _run_finalizers, +) + +from .compat import get_errno + +__all__ = [ + 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', + 'log_to_stderr', 'get_temp_dir', 'register_after_fork', + 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', + 'SUBDEBUG', 'SUBWARNING', +] + +# +# Logging +# + +NOTSET = 0 +SUBDEBUG = 5 +DEBUG = 10 +INFO = 20 +SUBWARNING = 25 +ERROR = 40 + +LOGGER_NAME = 'multiprocessing' +DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' + +_logger = None +_log_to_stderr = False + + +def sub_debug(msg, *args, **kwargs): + if _logger: + _logger.log(SUBDEBUG, msg, *args, **kwargs) + + +def debug(msg, *args, **kwargs): + if _logger: + _logger.log(DEBUG, msg, *args, **kwargs) + return True + return False + + +def info(msg, *args, **kwargs): + if _logger: + _logger.log(INFO, msg, *args, **kwargs) + return True + return False + + +def sub_warning(msg, *args, **kwargs): + if _logger: + _logger.log(SUBWARNING, msg, *args, **kwargs) + return True + return False + + +def error(msg, *args, **kwargs): + if _logger: + _logger.log(ERROR, msg, *args, **kwargs) + return True + return False + + +def get_logger(): + ''' + Returns logger used by multiprocessing + ''' + global _logger + import logging + + logging._acquireLock() + try: + if not _logger: + + _logger = logging.getLogger(LOGGER_NAME) + _logger.propagate = 0 + logging.addLevelName(SUBDEBUG, 'SUBDEBUG') + logging.addLevelName(SUBWARNING, 'SUBWARNING') + + # XXX multiprocessing should cleanup before logging + if hasattr(atexit, 'unregister'): + atexit.unregister(_exit_function) + atexit.register(_exit_function) + else: + atexit._exithandlers.remove((_exit_function, (), {})) + atexit._exithandlers.append((_exit_function, (), {})) + finally: + logging._releaseLock() + + return _logger + + +def log_to_stderr(level=None): + ''' + Turn on logging and add a handler which prints to stderr + ''' + global _log_to_stderr + import logging + + logger = get_logger() + formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) + handler = logging.StreamHandler() + handler.setFormatter(formatter) + logger.addHandler(handler) + + if level: + logger.setLevel(level) + _log_to_stderr = True + return _logger + + +def _eintr_retry(func): + ''' + Automatic retry after EINTR. + ''' + + @functools.wraps(func) + def wrapped(*args, **kwargs): + while 1: + try: + return func(*args, **kwargs) + except OSError as exc: + if get_errno(exc) != errno.EINTR: + raise + return wrapped diff --git a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/DESCRIPTION.rst b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..dfc4aec --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/DESCRIPTION.rst @@ -0,0 +1,428 @@ +================================= + celery - Distributed Task Queue +================================= + +.. image:: http://cloud.github.com/downloads/celery/celery/celery_128.png + +:Version: 3.1.26 (Cipater) +:Web: http://celeryproject.org/ +:Download: http://pypi.python.org/pypi/celery/ +:Source: http://github.com/celery/celery/ +:Keywords: task queue, job queue, asynchronous, async, rabbitmq, amqp, redis, + python, webhooks, queue, distributed + +-- + +What is a Task Queue? +===================== + +Task queues are used as a mechanism to distribute work across threads or +machines. + +A task queue's input is a unit of work, called a task, dedicated worker +processes then constantly monitor the queue for new work to perform. + +Celery communicates via messages, usually using a broker +to mediate between clients and workers. To initiate a task a client puts a +message on the queue, the broker then delivers the message to a worker. + +A Celery system can consist of multiple workers and brokers, giving way +to high availability and horizontal scaling. + +Celery is a library written in Python, but the protocol can be implemented in +any language. So far there's RCelery_ for the Ruby programming language, and a +`PHP client`, but language interoperability can also be achieved +by using webhooks. + +.. _RCelery: http://leapfrogdevelopment.github.com/rcelery/ +.. _`PHP client`: https://github.com/gjedeer/celery-php +.. _`using webhooks`: + http://docs.celeryproject.org/en/latest/userguide/remote-tasks.html + +What do I need? +=============== + +Celery version 3.0 runs on, + +- Python (2.5, 2.6, 2.7, 3.2, 3.3) +- PyPy (1.8, 1.9) +- Jython (2.5, 2.7). + +This is the last version to support Python 2.5, +and from Celery 3.1, Python 2.6 or later is required. +The last version to support Python 2.4 was Celery series 2.2. + +*Celery* is usually used with a message broker to send and receive messages. +The RabbitMQ, Redis transports are feature complete, +but there's also experimental support for a myriad of other solutions, including +using SQLite for local development. + +*Celery* can run on a single machine, on multiple machines, or even +across datacenters. + +Get Started +=========== + +If this is the first time you're trying to use Celery, or you are +new to Celery 3.0 coming from previous versions then you should read our +getting started tutorials: + +- `First steps with Celery`_ + + Tutorial teaching you the bare minimum needed to get started with Celery. + +- `Next steps`_ + + A more complete overview, showing more features. + +.. _`First steps with Celery`: + http://docs.celeryproject.org/en/latest/getting-started/first-steps-with-celery.html + +.. _`Next steps`: + http://docs.celeryproject.org/en/latest/getting-started/next-steps.html + +Celery is... +============ + +- **Simple** + + Celery is easy to use and maintain, and does *not need configuration files*. + + It has an active, friendly community you can talk to for support, + including a `mailing-list`_ and and an IRC channel. + + Here's one of the simplest applications you can make:: + + from celery import Celery + + app = Celery('hello', broker='amqp://guest@localhost//') + + @app.task + def hello(): + return 'hello world' + +- **Highly Available** + + Workers and clients will automatically retry in the event + of connection loss or failure, and some brokers support + HA in way of *Master/Master* or *Master/Slave* replication. + +- **Fast** + + A single Celery process can process millions of tasks a minute, + with sub-millisecond round-trip latency (using RabbitMQ, + py-librabbitmq, and optimized settings). + +- **Flexible** + + Almost every part of *Celery* can be extended or used on its own, + Custom pool implementations, serializers, compression schemes, logging, + schedulers, consumers, producers, autoscalers, broker transports and much more. + +It supports... +============== + + - **Message Transports** + + - RabbitMQ_, Redis_, + - MongoDB_ (experimental), Amazon SQS (experimental), + - CouchDB_ (experimental), SQLAlchemy_ (experimental), + - Django ORM (experimental), `IronMQ`_ + - and more... + + - **Concurrency** + + - Prefork, Eventlet_, gevent_, threads/single threaded + + - **Result Stores** + + - AMQP, Redis + - memcached, MongoDB + - SQLAlchemy, Django ORM + - Apache Cassandra, IronCache + + - **Serialization** + + - *pickle*, *json*, *yaml*, *msgpack*. + - *zlib*, *bzip2* compression. + - Cryptographic message signing. + +.. _`Eventlet`: http://eventlet.net/ +.. _`gevent`: http://gevent.org/ + +.. _RabbitMQ: http://rabbitmq.com +.. _Redis: http://redis.io +.. _MongoDB: http://mongodb.org +.. _Beanstalk: http://kr.github.com/beanstalkd +.. _CouchDB: http://couchdb.apache.org +.. _SQLAlchemy: http://sqlalchemy.org +.. _`IronMQ`: http://iron.io + +Framework Integration +===================== + +Celery is easy to integrate with web frameworks, some of which even have +integration packages: + + +--------------------+------------------------+ + | `Django`_ | not needed | + +--------------------+------------------------+ + | `Pyramid`_ | `pyramid_celery`_ | + +--------------------+------------------------+ + | `Pylons`_ | `celery-pylons`_ | + +--------------------+------------------------+ + | `Flask`_ | not needed | + +--------------------+------------------------+ + | `web2py`_ | `web2py-celery`_ | + +--------------------+------------------------+ + | `Tornado`_ | `tornado-celery`_ | + +--------------------+------------------------+ + +The integration packages are not strictly necessary, but they can make +development easier, and sometimes they add important hooks like closing +database connections at ``fork``. + +.. _`Django`: http://djangoproject.com/ +.. _`Pylons`: http://pylonsproject.org/ +.. _`Flask`: http://flask.pocoo.org/ +.. _`web2py`: http://web2py.com/ +.. _`Bottle`: http://bottlepy.org/ +.. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html +.. _`pyramid_celery`: http://pypi.python.org/pypi/pyramid_celery/ +.. _`django-celery`: http://pypi.python.org/pypi/django-celery +.. _`celery-pylons`: http://pypi.python.org/pypi/celery-pylons +.. _`web2py-celery`: http://code.google.com/p/web2py-celery/ +.. _`Tornado`: http://www.tornadoweb.org/ +.. _`tornado-celery`: http://github.com/mher/tornado-celery/ + +.. _celery-documentation: + +Documentation +============= + +The `latest documentation`_ with user guides, tutorials and API reference +is hosted at Read The Docs. + +.. _`latest documentation`: http://docs.celeryproject.org/en/latest/ + +.. _celery-installation: + +Installation +============ + +You can install Celery either via the Python Package Index (PyPI) +or from source. + +To install using `pip`,:: + + $ pip install -U Celery + +To install using `easy_install`,:: + + $ easy_install -U Celery + +.. _bundles: + +Bundles +------- + +Celery also defines a group of bundles that can be used +to install Celery and the dependencies for a given feature. + +You can specify these in your requirements or on the ``pip`` comand-line +by using brackets. Multiple bundles can be specified by separating them by +commas. +:: + + $ pip install "celery[librabbitmq]" + + $ pip install "celery[librabbitmq,redis,auth,msgpack]" + +The following bundles are available: + +Serializers +~~~~~~~~~~~ + +:celery[auth]: + for using the auth serializer. + +:celery[msgpack]: + for using the msgpack serializer. + +:celery[yaml]: + for using the yaml serializer. + +Concurrency +~~~~~~~~~~~ + +:celery[eventlet]: + for using the eventlet pool. + +:celery[gevent]: + for using the gevent pool. + +:celery[threads]: + for using the thread pool. + +Transports and Backends +~~~~~~~~~~~~~~~~~~~~~~~ + +:celery[librabbitmq]: + for using the librabbitmq C library. + +:celery[redis]: + for using Redis as a message transport or as a result backend. + +:celery[mongodb]: + for using MongoDB as a message transport (*experimental*), + or as a result backend (*supported*). + +:celery[sqs]: + for using Amazon SQS as a message transport (*experimental*). + +:celery[memcache]: + for using memcached as a result backend. + +:celery[cassandra]: + for using Apache Cassandra as a result backend. + +:celery[couchdb]: + for using CouchDB as a message transport (*experimental*). + +:celery[couchbase]: + for using CouchBase as a result backend. + +:celery[beanstalk]: + for using Beanstalk as a message transport (*experimental*). + +:celery[zookeeper]: + for using Zookeeper as a message transport. + +:celery[zeromq]: + for using ZeroMQ as a message transport (*experimental*). + +:celery[sqlalchemy]: + for using SQLAlchemy as a message transport (*experimental*), + or as a result backend (*supported*). + +:celery[pyro]: + for using the Pyro4 message transport (*experimental*). + +:celery[slmq]: + for using the SoftLayer Message Queue transport (*experimental*). + +.. _celery-installing-from-source: + +Downloading and installing from source +-------------------------------------- + +Download the latest version of Celery from +http://pypi.python.org/pypi/celery/ + +You can install it by doing the following,:: + + $ tar xvfz celery-0.0.0.tar.gz + $ cd celery-0.0.0 + $ python setup.py build + # python setup.py install + +The last command must be executed as a privileged user if +you are not currently using a virtualenv. + +.. _celery-installing-from-git: + +Using the development version +----------------------------- + +With pip +~~~~~~~~ + +The Celery development version also requires the development +versions of ``kombu``, ``amqp`` and ``billiard``. + +You can install the latest snapshot of these using the following +pip commands:: + + $ pip install https://github.com/celery/celery/zipball/master#egg=celery + $ pip install https://github.com/celery/billiard/zipball/master#egg=billiard + $ pip install https://github.com/celery/py-amqp/zipball/master#egg=amqp + $ pip install https://github.com/celery/kombu/zipball/master#egg=kombu + +With git +~~~~~~~~ + +Please the Contributing section. + +.. _getting-help: + +Getting Help +============ + +.. _mailing-list: + +Mailing list +------------ + +For discussions about the usage, development, and future of celery, +please join the `celery-users`_ mailing list. + +.. _`celery-users`: http://groups.google.com/group/celery-users/ + +.. _irc-channel: + +IRC +--- + +Come chat with us on IRC. The **#celery** channel is located at the `Freenode`_ +network. + +.. _`Freenode`: http://freenode.net + +.. _bug-tracker: + +Bug tracker +=========== + +If you have any suggestions, bug reports or annoyances please report them +to our issue tracker at http://github.com/celery/celery/issues/ + +.. _wiki: + +Wiki +==== + +http://wiki.github.com/celery/celery/ + +.. _contributing-short: + +Contributing +============ + +Development of `celery` happens at Github: http://github.com/celery/celery + +You are highly encouraged to participate in the development +of `celery`. If you don't like Github (for some reason) you're welcome +to send regular patches. + +Be sure to also read the `Contributing to Celery`_ section in the +documentation. + +.. _`Contributing to Celery`: + http://docs.celeryproject.org/en/master/contributing.html + +.. _license: + +License +======= + +This software is licensed under the `New BSD License`. See the ``LICENSE`` +file in the top distribution directory for the full license text. + +.. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround + + +.. image:: https://d2weczhvl823v0.cloudfront.net/celery/celery/trend.png + :alt: Bitdeli badge + :target: https://bitdeli.com/free + + diff --git a/thesisenv/lib/python3.6/site-packages/pip-18.0.dist-info/top_level.txt b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/INSTALLER similarity index 100% rename from thesisenv/lib/python3.6/site-packages/pip-18.0.dist-info/top_level.txt rename to thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/INSTALLER diff --git a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/METADATA b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/METADATA new file mode 100644 index 0000000..90226ec --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/METADATA @@ -0,0 +1,500 @@ +Metadata-Version: 2.0 +Name: celery +Version: 3.1.26.post2 +Summary: Distributed Task Queue +Home-page: http://celeryproject.org +Author: Ask Solem +Author-email: ask@celeryproject.org +License: BSD +Description-Content-Type: UNKNOWN +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: BSD License +Classifier: Topic :: System :: Distributed Computing +Classifier: Topic :: Software Development :: Object Brokering +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python :: Implementation :: Jython +Classifier: Operating System :: OS Independent +Classifier: Operating System :: POSIX +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: MacOS :: MacOS X +Requires-Dist: pytz (>dev) +Requires-Dist: billiard (<3.4,>=3.3.0.23) +Requires-Dist: kombu (<3.1,>=3.0.37) +Provides-Extra: auth +Requires-Dist: pyOpenSSL; extra == 'auth' +Provides-Extra: beanstalk +Requires-Dist: beanstalkc; extra == 'beanstalk' +Provides-Extra: cassandra +Requires-Dist: pycassa; extra == 'cassandra' +Provides-Extra: couchbase +Requires-Dist: couchbase; extra == 'couchbase' +Provides-Extra: couchdb +Requires-Dist: couchdb; extra == 'couchdb' +Provides-Extra: eventlet +Requires-Dist: eventlet; extra == 'eventlet' +Provides-Extra: gevent +Requires-Dist: gevent; extra == 'gevent' +Provides-Extra: librabbitmq +Requires-Dist: librabbitmq (>=1.6.1); extra == 'librabbitmq' +Provides-Extra: memcache +Requires-Dist: pylibmc; extra == 'memcache' +Provides-Extra: mongodb +Requires-Dist: pymongo (>=2.6.2); extra == 'mongodb' +Provides-Extra: msgpack +Requires-Dist: msgpack-python (>=0.3.0); extra == 'msgpack' +Provides-Extra: pyro +Requires-Dist: pyro4; extra == 'pyro' +Provides-Extra: redis +Requires-Dist: redis (>=2.8.0); extra == 'redis' +Provides-Extra: slmq +Requires-Dist: softlayer-messaging (>=1.0.3); extra == 'slmq' +Provides-Extra: sqlalchemy +Requires-Dist: sqlalchemy; extra == 'sqlalchemy' +Provides-Extra: sqs +Requires-Dist: boto (>=2.13.3); extra == 'sqs' +Provides-Extra: threads +Requires-Dist: threadpool; extra == 'threads' +Provides-Extra: yaml +Requires-Dist: PyYAML (>=3.10); extra == 'yaml' +Provides-Extra: zeromq +Requires-Dist: pyzmq (>=13.1.0); extra == 'zeromq' +Provides-Extra: zookeeper +Requires-Dist: kazoo (>=1.3.1); extra == 'zookeeper' + +================================= + celery - Distributed Task Queue +================================= + +.. image:: http://cloud.github.com/downloads/celery/celery/celery_128.png + +:Version: 3.1.26 (Cipater) +:Web: http://celeryproject.org/ +:Download: http://pypi.python.org/pypi/celery/ +:Source: http://github.com/celery/celery/ +:Keywords: task queue, job queue, asynchronous, async, rabbitmq, amqp, redis, + python, webhooks, queue, distributed + +-- + +What is a Task Queue? +===================== + +Task queues are used as a mechanism to distribute work across threads or +machines. + +A task queue's input is a unit of work, called a task, dedicated worker +processes then constantly monitor the queue for new work to perform. + +Celery communicates via messages, usually using a broker +to mediate between clients and workers. To initiate a task a client puts a +message on the queue, the broker then delivers the message to a worker. + +A Celery system can consist of multiple workers and brokers, giving way +to high availability and horizontal scaling. + +Celery is a library written in Python, but the protocol can be implemented in +any language. So far there's RCelery_ for the Ruby programming language, and a +`PHP client`, but language interoperability can also be achieved +by using webhooks. + +.. _RCelery: http://leapfrogdevelopment.github.com/rcelery/ +.. _`PHP client`: https://github.com/gjedeer/celery-php +.. _`using webhooks`: + http://docs.celeryproject.org/en/latest/userguide/remote-tasks.html + +What do I need? +=============== + +Celery version 3.0 runs on, + +- Python (2.5, 2.6, 2.7, 3.2, 3.3) +- PyPy (1.8, 1.9) +- Jython (2.5, 2.7). + +This is the last version to support Python 2.5, +and from Celery 3.1, Python 2.6 or later is required. +The last version to support Python 2.4 was Celery series 2.2. + +*Celery* is usually used with a message broker to send and receive messages. +The RabbitMQ, Redis transports are feature complete, +but there's also experimental support for a myriad of other solutions, including +using SQLite for local development. + +*Celery* can run on a single machine, on multiple machines, or even +across datacenters. + +Get Started +=========== + +If this is the first time you're trying to use Celery, or you are +new to Celery 3.0 coming from previous versions then you should read our +getting started tutorials: + +- `First steps with Celery`_ + + Tutorial teaching you the bare minimum needed to get started with Celery. + +- `Next steps`_ + + A more complete overview, showing more features. + +.. _`First steps with Celery`: + http://docs.celeryproject.org/en/latest/getting-started/first-steps-with-celery.html + +.. _`Next steps`: + http://docs.celeryproject.org/en/latest/getting-started/next-steps.html + +Celery is... +============ + +- **Simple** + + Celery is easy to use and maintain, and does *not need configuration files*. + + It has an active, friendly community you can talk to for support, + including a `mailing-list`_ and and an IRC channel. + + Here's one of the simplest applications you can make:: + + from celery import Celery + + app = Celery('hello', broker='amqp://guest@localhost//') + + @app.task + def hello(): + return 'hello world' + +- **Highly Available** + + Workers and clients will automatically retry in the event + of connection loss or failure, and some brokers support + HA in way of *Master/Master* or *Master/Slave* replication. + +- **Fast** + + A single Celery process can process millions of tasks a minute, + with sub-millisecond round-trip latency (using RabbitMQ, + py-librabbitmq, and optimized settings). + +- **Flexible** + + Almost every part of *Celery* can be extended or used on its own, + Custom pool implementations, serializers, compression schemes, logging, + schedulers, consumers, producers, autoscalers, broker transports and much more. + +It supports... +============== + + - **Message Transports** + + - RabbitMQ_, Redis_, + - MongoDB_ (experimental), Amazon SQS (experimental), + - CouchDB_ (experimental), SQLAlchemy_ (experimental), + - Django ORM (experimental), `IronMQ`_ + - and more... + + - **Concurrency** + + - Prefork, Eventlet_, gevent_, threads/single threaded + + - **Result Stores** + + - AMQP, Redis + - memcached, MongoDB + - SQLAlchemy, Django ORM + - Apache Cassandra, IronCache + + - **Serialization** + + - *pickle*, *json*, *yaml*, *msgpack*. + - *zlib*, *bzip2* compression. + - Cryptographic message signing. + +.. _`Eventlet`: http://eventlet.net/ +.. _`gevent`: http://gevent.org/ + +.. _RabbitMQ: http://rabbitmq.com +.. _Redis: http://redis.io +.. _MongoDB: http://mongodb.org +.. _Beanstalk: http://kr.github.com/beanstalkd +.. _CouchDB: http://couchdb.apache.org +.. _SQLAlchemy: http://sqlalchemy.org +.. _`IronMQ`: http://iron.io + +Framework Integration +===================== + +Celery is easy to integrate with web frameworks, some of which even have +integration packages: + + +--------------------+------------------------+ + | `Django`_ | not needed | + +--------------------+------------------------+ + | `Pyramid`_ | `pyramid_celery`_ | + +--------------------+------------------------+ + | `Pylons`_ | `celery-pylons`_ | + +--------------------+------------------------+ + | `Flask`_ | not needed | + +--------------------+------------------------+ + | `web2py`_ | `web2py-celery`_ | + +--------------------+------------------------+ + | `Tornado`_ | `tornado-celery`_ | + +--------------------+------------------------+ + +The integration packages are not strictly necessary, but they can make +development easier, and sometimes they add important hooks like closing +database connections at ``fork``. + +.. _`Django`: http://djangoproject.com/ +.. _`Pylons`: http://pylonsproject.org/ +.. _`Flask`: http://flask.pocoo.org/ +.. _`web2py`: http://web2py.com/ +.. _`Bottle`: http://bottlepy.org/ +.. _`Pyramid`: http://docs.pylonsproject.org/en/latest/docs/pyramid.html +.. _`pyramid_celery`: http://pypi.python.org/pypi/pyramid_celery/ +.. _`django-celery`: http://pypi.python.org/pypi/django-celery +.. _`celery-pylons`: http://pypi.python.org/pypi/celery-pylons +.. _`web2py-celery`: http://code.google.com/p/web2py-celery/ +.. _`Tornado`: http://www.tornadoweb.org/ +.. _`tornado-celery`: http://github.com/mher/tornado-celery/ + +.. _celery-documentation: + +Documentation +============= + +The `latest documentation`_ with user guides, tutorials and API reference +is hosted at Read The Docs. + +.. _`latest documentation`: http://docs.celeryproject.org/en/latest/ + +.. _celery-installation: + +Installation +============ + +You can install Celery either via the Python Package Index (PyPI) +or from source. + +To install using `pip`,:: + + $ pip install -U Celery + +To install using `easy_install`,:: + + $ easy_install -U Celery + +.. _bundles: + +Bundles +------- + +Celery also defines a group of bundles that can be used +to install Celery and the dependencies for a given feature. + +You can specify these in your requirements or on the ``pip`` comand-line +by using brackets. Multiple bundles can be specified by separating them by +commas. +:: + + $ pip install "celery[librabbitmq]" + + $ pip install "celery[librabbitmq,redis,auth,msgpack]" + +The following bundles are available: + +Serializers +~~~~~~~~~~~ + +:celery[auth]: + for using the auth serializer. + +:celery[msgpack]: + for using the msgpack serializer. + +:celery[yaml]: + for using the yaml serializer. + +Concurrency +~~~~~~~~~~~ + +:celery[eventlet]: + for using the eventlet pool. + +:celery[gevent]: + for using the gevent pool. + +:celery[threads]: + for using the thread pool. + +Transports and Backends +~~~~~~~~~~~~~~~~~~~~~~~ + +:celery[librabbitmq]: + for using the librabbitmq C library. + +:celery[redis]: + for using Redis as a message transport or as a result backend. + +:celery[mongodb]: + for using MongoDB as a message transport (*experimental*), + or as a result backend (*supported*). + +:celery[sqs]: + for using Amazon SQS as a message transport (*experimental*). + +:celery[memcache]: + for using memcached as a result backend. + +:celery[cassandra]: + for using Apache Cassandra as a result backend. + +:celery[couchdb]: + for using CouchDB as a message transport (*experimental*). + +:celery[couchbase]: + for using CouchBase as a result backend. + +:celery[beanstalk]: + for using Beanstalk as a message transport (*experimental*). + +:celery[zookeeper]: + for using Zookeeper as a message transport. + +:celery[zeromq]: + for using ZeroMQ as a message transport (*experimental*). + +:celery[sqlalchemy]: + for using SQLAlchemy as a message transport (*experimental*), + or as a result backend (*supported*). + +:celery[pyro]: + for using the Pyro4 message transport (*experimental*). + +:celery[slmq]: + for using the SoftLayer Message Queue transport (*experimental*). + +.. _celery-installing-from-source: + +Downloading and installing from source +-------------------------------------- + +Download the latest version of Celery from +http://pypi.python.org/pypi/celery/ + +You can install it by doing the following,:: + + $ tar xvfz celery-0.0.0.tar.gz + $ cd celery-0.0.0 + $ python setup.py build + # python setup.py install + +The last command must be executed as a privileged user if +you are not currently using a virtualenv. + +.. _celery-installing-from-git: + +Using the development version +----------------------------- + +With pip +~~~~~~~~ + +The Celery development version also requires the development +versions of ``kombu``, ``amqp`` and ``billiard``. + +You can install the latest snapshot of these using the following +pip commands:: + + $ pip install https://github.com/celery/celery/zipball/master#egg=celery + $ pip install https://github.com/celery/billiard/zipball/master#egg=billiard + $ pip install https://github.com/celery/py-amqp/zipball/master#egg=amqp + $ pip install https://github.com/celery/kombu/zipball/master#egg=kombu + +With git +~~~~~~~~ + +Please the Contributing section. + +.. _getting-help: + +Getting Help +============ + +.. _mailing-list: + +Mailing list +------------ + +For discussions about the usage, development, and future of celery, +please join the `celery-users`_ mailing list. + +.. _`celery-users`: http://groups.google.com/group/celery-users/ + +.. _irc-channel: + +IRC +--- + +Come chat with us on IRC. The **#celery** channel is located at the `Freenode`_ +network. + +.. _`Freenode`: http://freenode.net + +.. _bug-tracker: + +Bug tracker +=========== + +If you have any suggestions, bug reports or annoyances please report them +to our issue tracker at http://github.com/celery/celery/issues/ + +.. _wiki: + +Wiki +==== + +http://wiki.github.com/celery/celery/ + +.. _contributing-short: + +Contributing +============ + +Development of `celery` happens at Github: http://github.com/celery/celery + +You are highly encouraged to participate in the development +of `celery`. If you don't like Github (for some reason) you're welcome +to send regular patches. + +Be sure to also read the `Contributing to Celery`_ section in the +documentation. + +.. _`Contributing to Celery`: + http://docs.celeryproject.org/en/master/contributing.html + +.. _license: + +License +======= + +This software is licensed under the `New BSD License`. See the ``LICENSE`` +file in the top distribution directory for the full license text. + +.. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround + + +.. image:: https://d2weczhvl823v0.cloudfront.net/celery/celery/trend.png + :alt: Bitdeli badge + :target: https://bitdeli.com/free + + diff --git a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/RECORD b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/RECORD new file mode 100644 index 0000000..3630f9f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/RECORD @@ -0,0 +1,496 @@ +celery/__init__.py,sha256=3CpQmXwUsO3qBXRvUbCUgeb95Hs76iUyti10oevsJWw,5727 +celery/__main__.py,sha256=Zdv8wB4CbSvtgrGUVIZyFkQcHxFS7z3RRijGi4uQMN4,983 +celery/_state.py,sha256=TU-oQvKpbZtrYpU6iF7OJsekP897J_qRR0Y62Y4LSy8,3921 +celery/beat.py,sha256=kcwCMfxcS7Jvd2p7dMmK0J4NO79-OlVQbJJokJWDcHI,19009 +celery/bootsteps.py,sha256=ASlSzf2DFfYfOvtud-p-m_zo7K3f5IKspzTAzjlfNc8,12382 +celery/canvas.py,sha256=b5WZZqdHuI2bhVbroMY-K2VU_XXBY0m5hkxfy3-KNFY,22501 +celery/datastructures.py,sha256=i0evKEjw8-OPZyT77Fjr7q-nrVKPKyk3IbQ94jJzoOk,18647 +celery/exceptions.py,sha256=8SJV-PofoiyOwbSzb8CueVeKlBDTJDHkkgPQE1Suu-w,4526 +celery/five.py,sha256=a-4lbbb-qHnEm0kh7BjENwWIuft-R4WzIC2htemnIsY,11695 +celery/local.py,sha256=vXD1d-QliYsrKAJgsIj0ZNG1KEXHcoB2Ty1JEOWH_Yg,10818 +celery/platforms.py,sha256=0W1WSk8b3AQ6oNhtM5JEgN27DHoXZzzSEJ3nvjwuBs0,24774 +celery/result.py,sha256=kzlMWbWxY_rfI90RsmrV2LB8c7X2iJDaYcOh5esAhy8,28701 +celery/schedules.py,sha256=XrWguXKa8-umIbG805tvzPmUbM6B2d41SKqr86CYUy8,21787 +celery/signals.py,sha256=zuwvWGAyIQLL4F0p83wRSbjBVdnQDnEsiCC3H3_3BAc,2929 +celery/states.py,sha256=qZ880RMvRcspPb87u13wlfiP0ilQh_-Ap_I8-l0PM6w,3430 +celery/app/__init__.py,sha256=Pg6NKoOd4tbajaxrIpMcYqV_gbguCnWGbUavNUJtRVg,4380 +celery/app/amqp.py,sha256=MCAmCd20hXGAO0ilV78BUUPDNxRpE5gLD7vKIodQswk,19101 +celery/app/annotations.py,sha256=mwfXit7ZByMykH0Su7KutgEXC2DxN0MIVKwioXtiqPU,1514 +celery/app/base.py,sha256=knLzZffbOmaC3LAF-zXDzNeVzuOnzr28o_y7EZ7_mFs,24196 +celery/app/builtins.py,sha256=Dmvkm-aeMosvGdFbNGXua5otk81Qjwh5vEIGjlZjPDg,14180 +celery/app/control.py,sha256=7CrvxyZE-fIW0gSDvfUSbaZN5nGd7AWFSUlKKC5AXoI,11023 +celery/app/defaults.py,sha256=4wUvjXss3CoJvdF5B271hot1rquOn26nXHvZ2dbQHaw,11238 +celery/app/log.py,sha256=LzKSBh61d_ZK_yCW5zfR4dOmzSOs6a4cjyAFg75cZu0,9065 +celery/app/registry.py,sha256=pJLgSmSyNtn-q-GESpQQSr2TFzh8yQvPuDHD7XzwxU8,1748 +celery/app/routes.py,sha256=YzooevUbmGNrrAHGR0AwFxtuKWL2xP6g14l99dssaFI,2967 +celery/app/task.py,sha256=TclL59Gs79Sn7h5HVdHOdQtxDU3AfgQJKB7PZz5RzZY,35574 +celery/app/trace.py,sha256=lmdPyBwFKSxkfTjVPOKaTD6Rnnhs1FIHdOhcbcVmhaQ,16717 +celery/app/utils.py,sha256=oR28DoRzVVMaSFOMZ47JFGvFAP3aTtPEEH7B1LsmFAs,8367 +celery/apps/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +celery/apps/beat.py,sha256=Yu31IM0cKqM5FWt1motBjRBAYvpIsODrPRDAp1J_IYI,5189 +celery/apps/worker.py,sha256=c8mxAhCSpG5K9snPXHwpDnCCOggVMlKnH4sS2Dq8SO8,12555 +celery/backends/__init__.py,sha256=2DzVIyTm-lq5W6ElqMAK9AiJxCynp6E-bF7gPoFgfAk,2206 +celery/backends/amqp.py,sha256=p1coJ96bJR-V__RztU58zzlilclnFqK1Mkp1NYnf44E,11622 +celery/backends/base.py,sha256=pBDi5K-SO7bWRB-gXNcDky5ADO0xwJazfOkRFvsMuFc,22572 +celery/backends/cache.py,sha256=DErN0OboNLQRmL-_E6wEbBmxylZPCUJOfyydDryW5wE,4635 +celery/backends/cassandra.py,sha256=UL4qeFYa5qUC0E7oJRmci2JhDp5z7d_OPNsJnkw-B6M,7219 +celery/backends/couchbase.py,sha256=F_qczQDDBmOmViFP8M0RZ0NXPlCWxFovqqGoB4WYghk,3382 +celery/backends/mongodb.py,sha256=Ke9zj5vhmTnVAHplOhiec8B5D62_ty30PDZEF_8LFck,8688 +celery/backends/redis.py,sha256=gBz8DhREI1rKMFwQ9behNVQP8qrWJoBwU3HAi9C4tXU,10566 +celery/backends/rpc.py,sha256=Qllbxw0T-rt6mgRxmNnZUlFgvpSgOiQOdBAU6mjlcGY,1790 +celery/backends/database/__init__.py,sha256=gCwDd2xkJ95jxwGWcIz9IIst1aryaGJ4NjufR7xPmmo,6568 +celery/backends/database/models.py,sha256=k_WXPzVk9BCGm63ne4nhQO5cDpA-WJ4afaoCtdk7dLE,2261 +celery/backends/database/session.py,sha256=tGJAnVNXOj-LW_z8Blh9u8aZ8j01M0aOLHomOrkNmvE,1840 +celery/bin/__init__.py,sha256=YULxAVdpSTcKce56Bt_l9rXSho8pqpcp082NwnkTRHs,87 +celery/bin/amqp.py,sha256=WoQCn_sg9Vbj7Bryd-sUNxNePtsl91c5_Oi3z1W0_Jk,11651 +celery/bin/base.py,sha256=saxceFnADwkNVLySAqgSaBu1W9LKfD2rfP6co_wtcBQ,21336 +celery/bin/beat.py,sha256=abMzN3d3Zu8VBKAeSiZuG1_P1loqTsu7TZWdkXt1ugM,2638 +celery/bin/celery.py,sha256=4BfRWimQltbDzUqIKmq_OSm2X4DYhwUgc0ypyDabLig,29485 +celery/bin/celeryd_detach.py,sha256=oWGoWfOgaSTi4hb-EpAKHWUPA1gXG0sjlMp6pz4PPuA,6026 +celery/bin/events.py,sha256=cSFvfzN5OHNdva0Yuzz5HNM1jhZZXtcaqdL0exVI578,4052 +celery/bin/graph.py,sha256=JycXaXGTtIyxCy96ph1Zk8FQ_3wk-9fhCDueH4hWneo,6420 +celery/bin/multi.py,sha256=owyqxdQROMFAJUMt-L5BFc8DQveSKftDHcZDlRjs_Sc,21265 +celery/bin/worker.py,sha256=P78klQzKKb872rCEXWj5MGUITA7ZN5pxiy559zjd5aU,9014 +celery/concurrency/__init__.py,sha256=t_AgXnznrRCoiAV_7ClDUzhwwu39rKIlpjr0vF7hbDg,820 +celery/concurrency/asynpool.py,sha256=MoEzDfw-po8p_kEUwjRRAATpuUoJ8hUM-BhbFHVKo0w,47804 +celery/concurrency/base.py,sha256=G_AOmxS6wsAMQ8BPcZWK2AoT4y30Sm76TtkZdGgdlrg,4203 +celery/concurrency/eventlet.py,sha256=c2R3K9Trpow43SkvnfFzkR65gbihJhIBTCaQod1LD7E,4287 +celery/concurrency/gevent.py,sha256=KjdibnAD5YfVDh1WHRKIoYKLCdGHp31WOOxXPy9UyMw,3509 +celery/concurrency/prefork.py,sha256=udTgaF-QycG4ZiDpZo_QhtjCuxcM1CUKUk4dhlXQMOU,5755 +celery/concurrency/solo.py,sha256=zi0qLzLJjO8ApdUokc-5UimsJyQFhD-_acSn8Cwgclc,696 +celery/concurrency/threads.py,sha256=N41qfLMLyWqv1cClfAm3ICATWJmC8DqfF3jReRkjgt8,1767 +celery/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +celery/contrib/abortable.py,sha256=bnOC_4lmXSrtGkvSFndEdWiJNyscynLrcpGKnr1NhcM,5094 +celery/contrib/batches.py,sha256=1GaaJw77TSjslI3B_iYleRrM-EPBDCNstmcahC8ER7U,7152 +celery/contrib/methods.py,sha256=PVmZu0PQ1rrAKzb4GzuyEPCYPUgyuFasjMpUFhEOJzU,2613 +celery/contrib/migrate.py,sha256=rMbY-7sn7sgmwkpqDleFCBUg1qR1weSi3DDmIYbss-c,11911 +celery/contrib/rdb.py,sha256=sH69j4_YBBwE9TPlqykaAlf11AN7a7r5_J3Yf5oqAeQ,4986 +celery/contrib/sphinx.py,sha256=SZd8CT67_MkcFrPUuiqDbjRF2B1QKEMO0H_ZnQcOTAQ,2019 +celery/events/__init__.py,sha256=HVSYE0r5JKMwtBbmeas_nM0LZM5wCBSPhR5lQ7GpYiI,14166 +celery/events/cursesmon.py,sha256=4sUQ8eulZwoaIRxSOwxVON86QknY2RO9Sf8dvtzyTZs,18311 +celery/events/dumper.py,sha256=LXvJDwjkO1mnny35r5xChiQinu3pDk5mJRK41PgPMnA,3285 +celery/events/snapshot.py,sha256=gkM6AkWp5Jv49gurjDDeI-NFa5FUWzwZ0A2ALiuV0EI,3268 +celery/events/state.py,sha256=5Qffr6Abj7ASxtV4-p_60PcHSVVneToW0e2Scgx6z5Q,23275 +celery/fixups/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +celery/fixups/django.py,sha256=fyPyX9OUnWhAuk-TWm--0XaeY3dNsdBOrpIvcTvvZWE,9093 +celery/loaders/__init__.py,sha256=qpF2RdoBeubV_teLZ2yKoRq8sj4aqLjRBoSCgyte47Y,1015 +celery/loaders/app.py,sha256=fFFD4SVrECpzM60DZVcnLjDtYhr1tf4ABPtkD3H1MbE,269 +celery/loaders/base.py,sha256=mclr01KtYoD0oXtYSg6erKYw8Bb27u0LJrBrD4hCFQk,9303 +celery/loaders/default.py,sha256=KH0Y2iA486qelyzoewv47SynpYJIofW2vbdFTcmGYbE,1705 +celery/security/__init__.py,sha256=KbfxRiy_FHJbYqVsedV7MlAPsThJdwxhjV5F3IjgQAU,1923 +celery/security/certificate.py,sha256=Mc925ch6wLe2sYXmBsRm7rmr2FXclq1wopEdVpRc6kc,2746 +celery/security/key.py,sha256=rBdjSYIgTHhqrSN2YUmqOU3xn56vamLigpZTtvSQqDI,679 +celery/security/serialization.py,sha256=D9iZNvuxA-SQXolHWOyGRnNPwCNnEqFbjayhf9vQ3E8,4011 +celery/security/utils.py,sha256=mI12UmxFkxzNCdWsrv71N6r9qNHGZwy9weSl_HaCNP0,759 +celery/task/__init__.py,sha256=d0iH36VG3zOfCCv6KjvXrcO-eePFOryCLexFeUI8PLc,1743 +celery/task/base.py,sha256=zkKUF640T8cf2ltk5b_6MOWYwNOYbjqshE9ofceihn0,5583 +celery/task/http.py,sha256=qEu9tPSqSit-5L6MuOJY1EFutFim8JVGL9bked9uSFw,6849 +celery/task/sets.py,sha256=GStbowg-IQW2Xu96qV6leMiYth3gQ9mQAcKy-3hNHkI,2769 +celery/task/trace.py,sha256=unQgQJ3BjhhvPQnkBqJ-WsHj74_nvYmYSn_E1pyGcm4,323 +celery/tests/__init__.py,sha256=G98w19Jt-55CrtCUpBzoE7ooUoDbBH_4OJmune3k0D4,2618 +celery/tests/case.py,sha256=kWtIhEH582gUSNcvSAJeH37RvUuyEEy8svDzuT6ewMg,25267 +celery/tests/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +celery/tests/app/test_amqp.py,sha256=yn8vLfD3lDdPjaTE9NGsNR3aQgcKJX3KNC0Uo8h3p3g,7778 +celery/tests/app/test_annotations.py,sha256=guYRiOgF-jqzqTKdjAbC469_nKjxtdq-PxVJNbMMvow,1532 +celery/tests/app/test_app.py,sha256=vlytaWTW7VcOhqIM4RPkcTRjpp7XtTwPjpEwo7AC3ns,23272 +celery/tests/app/test_beat.py,sha256=zoHiwseH7Vw5YOcerhDMpVKog9QgIPXa7POdMTeb6JM,17387 +celery/tests/app/test_builtins.py,sha256=OxqNpLV9Z6KFrtxokJ8VHVuec-dA40nLCtMVH22pwBw,6575 +celery/tests/app/test_celery.py,sha256=Q4XTxX96kX-IUZMw95Q96AmVeeE1L00_2bfTOORodJg,535 +celery/tests/app/test_control.py,sha256=IcbpqPMVaOsL-9vaskBq8Hx2V7_09CYC5Y8kuobX538,7022 +celery/tests/app/test_defaults.py,sha256=gDxD5htqT_cFeUruz8neLLj-V1ffys5nb7u7138VlKQ,1815 +celery/tests/app/test_exceptions.py,sha256=co-o7xbNKNBAIsIW5E4x5dQntv-HK-72e1PnqsOR3Ag,849 +celery/tests/app/test_loaders.py,sha256=h5c_QJcsmoD56Uwhsi4cACK3w4cP1dnd3d-8-rOUtC0,9487 +celery/tests/app/test_log.py,sha256=nW_uMGURkHnEs-vEGg-ciTYQmXPoQXcfAvfSe7jPZpY,12745 +celery/tests/app/test_registry.py,sha256=Kw6BIkMuJMt-XRMLnVr1Dce3MLZeO4J5-abCEwGf5NM,2512 +celery/tests/app/test_routes.py,sha256=ZuoWarzltzzRx58cB8dam8i1qkZKf00A2IpkBxfCWkQ,5354 +celery/tests/app/test_schedules.py,sha256=KxjiGMXjuzGr0IZsb-Bph2AhUPeNAKNhBBajBSZ7XNo,28559 +celery/tests/app/test_utils.py,sha256=10EAWo_5AyYYddROKuSiylZixzChcqdUg06Wev2PIqw,1309 +celery/tests/backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +celery/tests/backends/test_amqp.py,sha256=j6HCUJv4JAn-UGjx9lwW-ZbrcGFzkXXPxtW--CeaGDY,14161 +celery/tests/backends/test_backends.py,sha256=DYm8tSsuUHSz1Gxnm1yBvNa1dHBbXn-WVrARWOoN6Vw,1535 +celery/tests/backends/test_base.py,sha256=vt2vdWekD0bEPT-L-ovdxit5RWbBn3RDdRMmjPBOglc,16071 +celery/tests/backends/test_cache.py,sha256=32keeBhHGLqlDDHzqviHwbAewuRpQPrPTnhv_6aW4fM,10280 +celery/tests/backends/test_cassandra.py,sha256=HOtGEfL82sUXBNOIr0D3z3fINmeeZH-mBDnOD83B93s,6412 +celery/tests/backends/test_couchbase.py,sha256=9Wu1cQ3UKUCV-yrrufeqpAQVvqyeMV1VjGFHXeQxAq0,4782 +celery/tests/backends/test_database.py,sha256=NlN4WTret69GSJrSJBGEU9IDFg1UdFEwpBQoJaI6FSk,6198 +celery/tests/backends/test_mongodb.py,sha256=xGbVOXl7Jfzpi1nYoVAw3RGRH-l89HYbejMS04-i8SM,14247 +celery/tests/backends/test_redis.py,sha256=uVPsHdOU14GSPZPLA6SY2JUUo79GltfUFVy1Olfc7fM,8655 +celery/tests/backends/test_rpc.py,sha256=iQBb0efYHvSSppUc6IaK2L-Jbr_Utk2iUpOoT8AzfYI,2317 +celery/tests/bin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +celery/tests/bin/test_amqp.py,sha256=paYj2ZdtfeodT9zmrwL8Pbmk2aCUhkGnAgbnEmrQZ6k,4721 +celery/tests/bin/test_base.py,sha256=8EXItbrOQT1L-bKP0fxjiwkkEjEMiApqBJrLw0xqbIc,11301 +celery/tests/bin/test_beat.py,sha256=QvTecx2yqc-e0KrQjqAXB3aISc999IHc__I10s6yOJc,5464 +celery/tests/bin/test_celery.py,sha256=CrMMqM3duzFMCt1xPHDf7GNpp7-9otCJFiN2R4HVI3U,18700 +celery/tests/bin/test_celeryd_detach.py,sha256=TchgSUR8vDB8OqRF6VpnYMKktpGrgZIQLXJhShWLcpE,4000 +celery/tests/bin/test_celeryevdump.py,sha256=1ImmCOndSESTVvARls0Wjngvd86NFp4WCF9r32OI8HI,2231 +celery/tests/bin/test_events.py,sha256=HYPiQJcFumiSHwtMnXO8dcURW2eNknyTCoSwpOWhm1w,2435 +celery/tests/bin/test_multi.py,sha256=MVGxbabKXDPgAmdME3K8zSmZ9bTjKkMviBCP0RHoum4,16477 +celery/tests/bin/test_worker.py,sha256=9LJJrDjzRQzM7LAPbEF0sK5mxLj8Xpjso9chODgJiQs,23503 +celery/tests/bin/proj/__init__.py,sha256=Q9qt46aWx0dx_SFfyigaH4goU1_ea01l7T4dhpDEeSs,104 +celery/tests/bin/proj/app.py,sha256=ZpUV5lnfIiYBH1jMsap--ZQbX9YWk-zEO_2RTwI7lYE,102 +celery/tests/compat_modules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +celery/tests/compat_modules/test_compat.py,sha256=q840-7jXVOMxpB5qS-5Pv99pZXPEeDMx15__SJVEHMI,2376 +celery/tests/compat_modules/test_compat_utils.py,sha256=0GQ1cxCiK8k4qOzvanBSSYLawO1vFEdmJaDAPz0AfCQ,1457 +celery/tests/compat_modules/test_decorators.py,sha256=KS7ghG-RYiibnK4JcGZX_r-d9RsRAhKitLXA72WzsGA,1066 +celery/tests/compat_modules/test_http.py,sha256=q1IaC7oUY9CEPUQga8t6RoMGbQQxBCGC3gODskqW3LU,5008 +celery/tests/compat_modules/test_messaging.py,sha256=XsQIR6vdtnfCpcPchGJUND1d6t6Mi7Cqjo0yJ3TY0zQ,357 +celery/tests/compat_modules/test_sets.py,sha256=h5yzbwuLtVqQHeY7INq9nmERApnhwWs1EbrfP8Lbkh8,7630 +celery/tests/concurrency/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +celery/tests/concurrency/test_concurrency.py,sha256=saYW1_SeBdRJTUwx_9wtNpZXslDJQCQsfcmoNS2BIZ4,3163 +celery/tests/concurrency/test_eventlet.py,sha256=hWsEQlZbSqQoPfjBM8xDq7ZeRJ-UJePxj8xlrmJ96dQ,3303 +celery/tests/concurrency/test_gevent.py,sha256=n8WCZO9JLTPOjVajRKPlaHI_qPRC6tr3DgVPO_3lZ20,4309 +celery/tests/concurrency/test_pool.py,sha256=nKgYR3rHtsuqcxKSGqC_tMF2glqIiecDZMEGG1bYCK4,2326 +celery/tests/concurrency/test_prefork.py,sha256=lSfo-sVt_f6rPjQNNV7hQ1wNGghM5SWwztO_ubcbx58,8490 +celery/tests/concurrency/test_solo.py,sha256=sljYxvp-oY4wSHftFOwXR5jSDCBZvmu_AswysJfeDSg,531 +celery/tests/concurrency/test_threads.py,sha256=8PkYbDDxdiPe3vWvKs3kQoEhPEV4MEVMoapeUQcooAY,1861 +celery/tests/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +celery/tests/contrib/test_abortable.py,sha256=61ys7MX9IgVZ5KsYMZKLoaLxovRcMQL8kz7DK8GHEWg,1513 +celery/tests/contrib/test_methods.py,sha256=_xxqh_QobP8kP_Y0YS-GvYGIFLp6L-aeL8qeSles4DQ,840 +celery/tests/contrib/test_migrate.py,sha256=tHMo0uQ-htzmIv9WBC0-KdLZeLk-75CKqLX2uFLn46Y,11182 +celery/tests/contrib/test_rdb.py,sha256=ubWjYB-0hzPXqVtAyeLw99a4DpdAGBmade9Fh70tKbU,3093 +celery/tests/events/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +celery/tests/events/test_cursesmon.py,sha256=iK8iwm8MtIVUiiWKbzW4WrWdCVX3hBPb4yAwYIrWetM,2653 +celery/tests/events/test_events.py,sha256=hKE-0cIMG8H1_91H9i2fB430J7ii-H2WzTS3q51cdis,8527 +celery/tests/events/test_snapshot.py,sha256=WeTY_uUeKNVSTxVtvAO2xYmftYlwA8uivd2KsmeNWjk,3734 +celery/tests/events/test_state.py,sha256=6exI3OaJ3eMCSYt1_gCgBTzYZ_6lVfm2SjSyVK09V90,18838 +celery/tests/fixups/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +celery/tests/fixups/test_django.py,sha256=LMJEHFjXpS2AY9J9lM03vxh9QOay15HUWj1s7hEAGps,11892 +celery/tests/functional/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +celery/tests/functional/case.py,sha256=hJGE0oy6ABIuBc3osBPQid7KwaKefe8WvsuIrKQkkwg,5599 +celery/tests/functional/tasks.py,sha256=nfDlTt_urjMpu-6ug4KWD5BieWFdxQbkhEVPepfEE_8,341 +celery/tests/security/__init__.py,sha256=ivc_G0iCuqZ1bbKyEABXdcH6X_nXZLIq5MlYgCP6z-A,3623 +celery/tests/security/case.py,sha256=YQ_4RTsCEkPxamivvitHvqsgbkStx-13ma00dwG2MMQ,403 +celery/tests/security/test_certificate.py,sha256=IADR90BtZUo9wOTX_K6QIHFB3qMqALatGnWaB90cfBA,2633 +celery/tests/security/test_key.py,sha256=xMmVbUbB4TzVUq8XZRS2jjuv6hu0AwUXrum-PLTIDqM,845 +celery/tests/security/test_security.py,sha256=QR7KlWiukB0sFtjLVhJDFzQBBWwbMshbzG6jef_RPFI,3845 +celery/tests/security/test_serialization.py,sha256=o0i-FO22l8kbJNSf8ajSg9cIE_oXH3QpECWfwA2bv1k,2252 +celery/tests/slow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +celery/tests/tasks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +celery/tests/tasks/test_canvas.py,sha256=Zu0iO8JxUajNzcOcpgo_kYoTplHS5eI1CgulBSypkaU,10664 +celery/tests/tasks/test_chord.py,sha256=jHLjW-4QwCEkag7uwhnvTcFB3-gdbFpIm0dal_QQO8w,7007 +celery/tests/tasks/test_context.py,sha256=o89z1fvYROuiIYM_HW3DpFaWz6y8-dIze2TSc2UmXoA,2546 +celery/tests/tasks/test_result.py,sha256=aMOqbAaf6SgtrNBwIWbjDC7pDFcNy0nWzabQIiuHHuo,24135 +celery/tests/tasks/test_states.py,sha256=z2OV113N4EPS33AZu3chN3XGEbPIrKmYa699gdIFHI4,1317 +celery/tests/tasks/test_tasks.py,sha256=CIF1MhneGOIUvUelwcBD7j6hUSDevgBVEQd7i6ral5I,15806 +celery/tests/tasks/test_trace.py,sha256=T8ZyKBfccSNTzmXc8_FyJURBO-kTaikijPLOBLDBVXU,6770 +celery/tests/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +celery/tests/utils/test_datastructures.py,sha256=b1nsrozGQTPMVgS5OaT6RrBQNOQgV5DDksadFIi97qc,10629 +celery/tests/utils/test_dispatcher.py,sha256=sZMai1M6cufvaXUcDnD4lLVMUdWM6txOAYsvNq-EDqg,3873 +celery/tests/utils/test_encoding.py,sha256=Lk5BF_Sr8QfcBndp5ldvzmVUwNBA1p_LjKo3t1rGk8c,526 +celery/tests/utils/test_functional.py,sha256=riIDlFNhFfmGa8VH04EEhE2HCUtvlF-ID6epYjLeb5A,5074 +celery/tests/utils/test_imports.py,sha256=rZ-Cjt1SYEvVO7SToxTk5bVmS0yW9Qnt754qX2PGeP0,1284 +celery/tests/utils/test_local.py,sha256=zmP1lZbgmMgFauUeVtEr5maQXWguS6LUxDExXTzSrIk,9755 +celery/tests/utils/test_mail.py,sha256=GJLoH4UAjxNWdFP-vBagjzGQnwuUvtRr45gSF8WXmLY,1594 +celery/tests/utils/test_pickle.py,sha256=j1RuTZJLLw79cquX0rpVy-6BHULvF8Jf0iwF7jOPVVk,1572 +celery/tests/utils/test_platforms.py,sha256=PYJPbu5xl22Ikit7h6Bik82xzDGxFQ8BhzmRWIyHcXU,23906 +celery/tests/utils/test_saferef.py,sha256=sGvHI0iGfpN2p83SaDABRTrHuHNfg2fpFUlbWHpRNis,2050 +celery/tests/utils/test_serialization.py,sha256=wiQPcEhVdNPpKqIIG0akHJ1HADDKGGTm45r5f36LzAQ,1129 +celery/tests/utils/test_sysinfo.py,sha256=wJpb59DawWxJ1ol00RDV1ML_kS-3475amczYgtbnj6Q,909 +celery/tests/utils/test_term.py,sha256=9UdtJKag7NOAaryRoTN_xzoE0SYcDGOdf4S9Dfh62Ww,2633 +celery/tests/utils/test_text.py,sha256=0vomEwnuw0hbA-081xFZso1X8uQ0bx1sDx5lxBDWD3w,2179 +celery/tests/utils/test_threads.py,sha256=RFIaXkJ0TdyXzoGAnHg9t7QhEIEMe44cSFrxYp-gDgA,2666 +celery/tests/utils/test_timer2.py,sha256=z3mxGq3WcpTXe2mwlfHGMj_HkVsFu9YyDkrhA2Wo_s0,5099 +celery/tests/utils/test_timeutils.py,sha256=u_8BEOt04m21JPCjm71nnbvFpEsIxGRQt6aDV_BPieM,8405 +celery/tests/utils/test_utils.py,sha256=GKEN-d4kK0NxSdAn-nnN_WQlJGOqx4RR4tarRTX26ss,2812 +celery/tests/worker/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +celery/tests/worker/test_autoreload.py,sha256=5Vs727vOhs6WCMwYe4RMQcjaTIVO-hPFxXdD5Ka2a0M,9877 +celery/tests/worker/test_autoscale.py,sha256=LNAObLqwy79pF0xLIWR36dk6VtL5Rq0uOT7oLQW3ZD4,5900 +celery/tests/worker/test_bootsteps.py,sha256=8QEzvNayK7oCjCAaX005-fvn11PK4_VXtr7EkSaXaok,9132 +celery/tests/worker/test_components.py,sha256=_0k_lYjst-zh5bwy-GlPMFgaGUsiZdeyu4ycUEnM8b0,920 +celery/tests/worker/test_consumer.py,sha256=8B3WloJo3sY2pzCkMUPZHg7R5u8rNihaS1VGeleLajo,16490 +celery/tests/worker/test_control.py,sha256=iY6BEvjy4jDk5sy7RTgpatz7ZzaJK-JrvF-EgiNrk1Q,21324 +celery/tests/worker/test_heartbeat.py,sha256=AoLPVZdyBZO6-F3JPdMRPC2O1hAYszFIFDPR3-4L3C8,1678 +celery/tests/worker/test_hub.py,sha256=iOPrKj-LN0Ts_OAhaljpjBq5XhYU_KtY51dZq8zGiIM,9735 +celery/tests/worker/test_loops.py,sha256=DMC4xqBQhuRICNwUhPXngM6avUDootuY7LxtKhZ5SAE,14533 +celery/tests/worker/test_request.py,sha256=KEEoQoGkUV81W9BmkOahMIepuJpTGvnsTreFAxrI1-g,31467 +celery/tests/worker/test_revoke.py,sha256=v9ZEOEspe565G8eRAup17o5cXA2BDRiiwxpPgGRDNRo,336 +celery/tests/worker/test_state.py,sha256=x7vtdk05Z44KQiwJOJTPHvebKMDCNy4ErY6_j4suFNs,4595 +celery/tests/worker/test_strategy.py,sha256=NIMURR2DosEY21Jx0KBk3Rz4fpYcuLZ4doFpsUqzFjc,4624 +celery/tests/worker/test_worker.py,sha256=9IcP8_WT4ujLSPL-v5MGp4fwUpUAjLHISJNBM77tzcs,38397 +celery/utils/__init__.py,sha256=kkA4rLGtWwH9m8-kjDxh6pfgf0SGYO-yBag-vrsUEBs,12713 +celery/utils/compat.py,sha256=oV2FXmhw_Yq7ub_RWl-XRZBJmd6xMpdrpaeIXvPgFt8,34 +celery/utils/debug.py,sha256=GihMTBeKlKYs-0lr3f2TXq1lgBh4CC-VhZsO-zkCQ98,3751 +celery/utils/encoding.py,sha256=yh10Ml0TsdibU3EGbd2lvDTpNvxtD6yN_2o8LI7sEno,361 +celery/utils/functional.py,sha256=C9CsNmx_VyB3U2Zwc83eIkDAD50dJN6ayWDukUK9b60,8814 +celery/utils/imports.py,sha256=oSzhVyyt9DZs2KtLqrkOOMwsOePPC_A6h7LeaZsoxJw,2914 +celery/utils/iso8601.py,sha256=zA4OeMDxKGzNEV6aFOWAZzpix7i6VUJms1vabPyx0B8,2738 +celery/utils/log.py,sha256=UYSFLqkxKNXpBbhfY9kZGn4jOVyKrfld-SmDiY2nYOQ,9292 +celery/utils/mail.py,sha256=rnhrwfJXl5cP_KOtcPWypAhBihnm0Fa5U7Xep36QqZ0,4944 +celery/utils/objects.py,sha256=grHN_y3LnktQPQI8eTw9vBwR6KcPBT-BRUL2VJHr6w4,2762 +celery/utils/serialization.py,sha256=Wgo-K628_x1dJTeClG5TWJbKxxfiQrAkEUvE41nRX5s,4869 +celery/utils/sysinfo.py,sha256=zlQAlqJgIt0SGG8AnIYvQRiy0yK9D2cC_RtmJpPz0Ac,993 +celery/utils/term.py,sha256=zBgNYbw86wuLvmEHG18-wXycmgqNiPxQ8bNVWt5bpk4,3927 +celery/utils/text.py,sha256=r5j7bXZr6gAnzr_TGfRT5Lp2OgHi6mPOu8lTmIq8_ss,2020 +celery/utils/threads.py,sha256=Ef1d7pj1loMilftUPqtbGhcQe1NoHPFlbtMHsqd-u04,9636 +celery/utils/timer2.py,sha256=zj3p0jH7lxpmWUAAaCS1EH6ubWp1m3vmyRWd8fCV6CA,4236 +celery/utils/timeutils.py,sha256=VcSgnUv9SmBq7Pcf6YdumLDcSlSpQt1U-Higr-NG0IA,11193 +celery/utils/dispatch/__init__.py,sha256=o1QuagJss6zaoNcLLotHHs94Eii7e4VNqu8j2Zj58y0,113 +celery/utils/dispatch/saferef.py,sha256=E2OXv4ViRbDh8zkQLerQwOeMfNkPmpu1HmxlmSScJbs,10894 +celery/utils/dispatch/signal.py,sha256=1K6bmvN7QdhSyfLwxflTmsxIQrpSirix5bxjjLTE4D0,8343 +celery/worker/__init__.py,sha256=vFwzEd6pUJTu1joU9OL_FIPY6DG4KNcrXJyuJRuGnPw,13641 +celery/worker/autoreload.py,sha256=svnUXyQqm2QlKBiUJWGJS51DcmedEtQgzKp7HYEuf0E,8868 +celery/worker/autoscale.py,sha256=e6iN0hq6FlOvsA9MmIulWySZxiRQNVAc9_ILZtLWetc,4864 +celery/worker/components.py,sha256=I3RmLjA7f0bQ8mFrSpLNH9s-j9Gg0sBptZx7wG9X3ro,7580 +celery/worker/consumer.py,sha256=AGmtw7dHAPHYmx1DLy3R2GbLJa30KXHoaMrLKmwLrzk,29347 +celery/worker/control.py,sha256=6IfSRbMSB7R9yXUGlR4sdkoJderRvKh-uty8tqclejM,11410 +celery/worker/heartbeat.py,sha256=NAM8Bq7ywHabXclltgrnCQb6FbnBh3sLPEveycNP3sk,1737 +celery/worker/job.py,sha256=bmkKSVd5BuHcGdQ_gn3MJeeLkx_-iBvKTRTImLrtBYI,21054 +celery/worker/loops.py,sha256=uAQDdHg-hAo7RvgqVMhgvOkDqmAkJDVGj4FgQNzObAc,3420 +celery/worker/pidbox.py,sha256=wfephMpB1gJu0f0WPUFrsTSPQjSGvwp3FCJNTcPtHzo,3447 +celery/worker/request.py,sha256=twDXCdrvS7T0KAlknT_XubTINPeoXyuqVPNnSsEqQgM,18826 +celery/worker/state.py,sha256=i2DbvX5v483Lyf_VUUKbzp7oMCHSvq5sMbb3A3G1zx4,6791 +celery/worker/strategy.py,sha256=TlObf6FkTTEeGF0FTbkkmh5Axl-IXiNxHZG0ec6C_DQ,3087 +celery-3.1.26.post2.dist-info/DESCRIPTION.rst,sha256=ZjBRr8syYhEecvIb6tx8S15p0Lgv7cWd3DpkJxw8gFs,11599 +celery-3.1.26.post2.dist-info/METADATA,sha256=0QZInn5VoWtzmvqD8gOQYch2rELrfOeA4v5baTqLnT8,14355 +celery-3.1.26.post2.dist-info/RECORD,, +celery-3.1.26.post2.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110 +celery-3.1.26.post2.dist-info/entry_points.txt,sha256=Cx6fgw30zDMkid9S17TYinhJwJHG5MjMfwZNGqDsTb4,178 +celery-3.1.26.post2.dist-info/metadata.json,sha256=wI1gtk7Xfkv36kqvqr7aIy34p86b3R_XDTsh-eJd3IA,3169 +celery-3.1.26.post2.dist-info/top_level.txt,sha256=sQQ-a5HNsZIi2A8DiKQnB1HODFMfmrzIAZIE8t_XiOA,7 +../../../bin/celery,sha256=reolwO892Sx1ruHQnX6Gb7v-Su0tWTjipUH7c7xDZQc,246 +../../../bin/celerybeat,sha256=goFpTFIXyk1hqyNFRA1KfbG61c9lJLp1wSo2pRe3mnU,262 +../../../bin/celeryd,sha256=tl_DPKb1fRWEd_McTOvrwTdSgYw3U4PtFFRb9UnrFFs,266 +../../../bin/celeryd-multi,sha256=Ktk0eE1NxFhtnA9MWP_AberKfyVK307SoM2SCVhQHto,264 +celery-3.1.26.post2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +celery/app/__pycache__/amqp.cpython-36.pyc,, +celery/app/__pycache__/annotations.cpython-36.pyc,, +celery/app/__pycache__/task.cpython-36.pyc,, +celery/app/__pycache__/control.cpython-36.pyc,, +celery/app/__pycache__/trace.cpython-36.pyc,, +celery/app/__pycache__/builtins.cpython-36.pyc,, +celery/app/__pycache__/base.cpython-36.pyc,, +celery/app/__pycache__/log.cpython-36.pyc,, +celery/app/__pycache__/defaults.cpython-36.pyc,, +celery/app/__pycache__/registry.cpython-36.pyc,, +celery/app/__pycache__/utils.cpython-36.pyc,, +celery/app/__pycache__/routes.cpython-36.pyc,, +celery/app/__pycache__/__init__.cpython-36.pyc,, +celery/bin/__pycache__/amqp.cpython-36.pyc,, +celery/bin/__pycache__/beat.cpython-36.pyc,, +celery/bin/__pycache__/celeryd_detach.cpython-36.pyc,, +celery/bin/__pycache__/multi.cpython-36.pyc,, +celery/bin/__pycache__/base.cpython-36.pyc,, +celery/bin/__pycache__/celery.cpython-36.pyc,, +celery/bin/__pycache__/__init__.cpython-36.pyc,, +celery/bin/__pycache__/worker.cpython-36.pyc,, +celery/bin/__pycache__/graph.cpython-36.pyc,, +celery/bin/__pycache__/events.cpython-36.pyc,, +celery/security/__pycache__/key.cpython-36.pyc,, +celery/security/__pycache__/certificate.cpython-36.pyc,, +celery/security/__pycache__/utils.cpython-36.pyc,, +celery/security/__pycache__/serialization.cpython-36.pyc,, +celery/security/__pycache__/__init__.cpython-36.pyc,, +celery/backends/database/__pycache__/models.cpython-36.pyc,, +celery/backends/database/__pycache__/session.cpython-36.pyc,, +celery/backends/database/__pycache__/__init__.cpython-36.pyc,, +celery/backends/__pycache__/amqp.cpython-36.pyc,, +celery/backends/__pycache__/cassandra.cpython-36.pyc,, +celery/backends/__pycache__/rpc.cpython-36.pyc,, +celery/backends/__pycache__/base.cpython-36.pyc,, +celery/backends/__pycache__/couchbase.cpython-36.pyc,, +celery/backends/__pycache__/cache.cpython-36.pyc,, +celery/backends/__pycache__/mongodb.cpython-36.pyc,, +celery/backends/__pycache__/__init__.cpython-36.pyc,, +celery/backends/__pycache__/redis.cpython-36.pyc,, +celery/tests/tasks/__pycache__/test_chord.cpython-36.pyc,, +celery/tests/tasks/__pycache__/test_states.cpython-36.pyc,, +celery/tests/tasks/__pycache__/test_tasks.cpython-36.pyc,, +celery/tests/tasks/__pycache__/test_canvas.cpython-36.pyc,, +celery/tests/tasks/__pycache__/test_context.cpython-36.pyc,, +celery/tests/tasks/__pycache__/test_result.cpython-36.pyc,, +celery/tests/tasks/__pycache__/__init__.cpython-36.pyc,, +celery/tests/tasks/__pycache__/test_trace.cpython-36.pyc,, +celery/tests/app/__pycache__/test_defaults.cpython-36.pyc,, +celery/tests/app/__pycache__/test_registry.cpython-36.pyc,, +celery/tests/app/__pycache__/test_loaders.cpython-36.pyc,, +celery/tests/app/__pycache__/test_builtins.cpython-36.pyc,, +celery/tests/app/__pycache__/test_log.cpython-36.pyc,, +celery/tests/app/__pycache__/test_utils.cpython-36.pyc,, +celery/tests/app/__pycache__/test_control.cpython-36.pyc,, +celery/tests/app/__pycache__/test_celery.cpython-36.pyc,, +celery/tests/app/__pycache__/test_routes.cpython-36.pyc,, +celery/tests/app/__pycache__/test_annotations.cpython-36.pyc,, +celery/tests/app/__pycache__/test_exceptions.cpython-36.pyc,, +celery/tests/app/__pycache__/test_beat.cpython-36.pyc,, +celery/tests/app/__pycache__/test_amqp.cpython-36.pyc,, +celery/tests/app/__pycache__/test_app.cpython-36.pyc,, +celery/tests/app/__pycache__/test_schedules.cpython-36.pyc,, +celery/tests/app/__pycache__/__init__.cpython-36.pyc,, +celery/tests/bin/proj/__pycache__/app.cpython-36.pyc,, +celery/tests/bin/proj/__pycache__/__init__.cpython-36.pyc,, +celery/tests/bin/__pycache__/test_worker.cpython-36.pyc,, +celery/tests/bin/__pycache__/test_events.cpython-36.pyc,, +celery/tests/bin/__pycache__/test_base.cpython-36.pyc,, +celery/tests/bin/__pycache__/test_celery.cpython-36.pyc,, +celery/tests/bin/__pycache__/test_celeryevdump.cpython-36.pyc,, +celery/tests/bin/__pycache__/test_multi.cpython-36.pyc,, +celery/tests/bin/__pycache__/test_beat.cpython-36.pyc,, +celery/tests/bin/__pycache__/test_amqp.cpython-36.pyc,, +celery/tests/bin/__pycache__/test_celeryd_detach.cpython-36.pyc,, +celery/tests/bin/__pycache__/__init__.cpython-36.pyc,, +celery/tests/security/__pycache__/test_security.cpython-36.pyc,, +celery/tests/security/__pycache__/test_certificate.cpython-36.pyc,, +celery/tests/security/__pycache__/test_serialization.cpython-36.pyc,, +celery/tests/security/__pycache__/test_key.cpython-36.pyc,, +celery/tests/security/__pycache__/case.cpython-36.pyc,, +celery/tests/security/__pycache__/__init__.cpython-36.pyc,, +celery/tests/slow/__pycache__/__init__.cpython-36.pyc,, +celery/tests/backends/__pycache__/test_cache.cpython-36.pyc,, +celery/tests/backends/__pycache__/test_backends.cpython-36.pyc,, +celery/tests/backends/__pycache__/test_base.cpython-36.pyc,, +celery/tests/backends/__pycache__/test_redis.cpython-36.pyc,, +celery/tests/backends/__pycache__/test_couchbase.cpython-36.pyc,, +celery/tests/backends/__pycache__/test_mongodb.cpython-36.pyc,, +celery/tests/backends/__pycache__/test_database.cpython-36.pyc,, +celery/tests/backends/__pycache__/test_amqp.cpython-36.pyc,, +celery/tests/backends/__pycache__/test_rpc.cpython-36.pyc,, +celery/tests/backends/__pycache__/__init__.cpython-36.pyc,, +celery/tests/backends/__pycache__/test_cassandra.cpython-36.pyc,, +celery/tests/compat_modules/__pycache__/test_decorators.cpython-36.pyc,, +celery/tests/compat_modules/__pycache__/test_sets.cpython-36.pyc,, +celery/tests/compat_modules/__pycache__/test_http.cpython-36.pyc,, +celery/tests/compat_modules/__pycache__/test_compat.cpython-36.pyc,, +celery/tests/compat_modules/__pycache__/test_messaging.cpython-36.pyc,, +celery/tests/compat_modules/__pycache__/test_compat_utils.cpython-36.pyc,, +celery/tests/compat_modules/__pycache__/__init__.cpython-36.pyc,, +celery/tests/utils/__pycache__/test_pickle.cpython-36.pyc,, +celery/tests/utils/__pycache__/test_local.cpython-36.pyc,, +celery/tests/utils/__pycache__/test_utils.cpython-36.pyc,, +celery/tests/utils/__pycache__/test_imports.cpython-36.pyc,, +celery/tests/utils/__pycache__/test_sysinfo.cpython-36.pyc,, +celery/tests/utils/__pycache__/test_platforms.cpython-36.pyc,, +celery/tests/utils/__pycache__/test_serialization.cpython-36.pyc,, +celery/tests/utils/__pycache__/test_saferef.cpython-36.pyc,, +celery/tests/utils/__pycache__/test_timeutils.cpython-36.pyc,, +celery/tests/utils/__pycache__/test_text.cpython-36.pyc,, +celery/tests/utils/__pycache__/test_datastructures.cpython-36.pyc,, +celery/tests/utils/__pycache__/test_encoding.cpython-36.pyc,, +celery/tests/utils/__pycache__/test_timer2.cpython-36.pyc,, +celery/tests/utils/__pycache__/test_term.cpython-36.pyc,, +celery/tests/utils/__pycache__/__init__.cpython-36.pyc,, +celery/tests/utils/__pycache__/test_dispatcher.cpython-36.pyc,, +celery/tests/utils/__pycache__/test_mail.cpython-36.pyc,, +celery/tests/utils/__pycache__/test_functional.cpython-36.pyc,, +celery/tests/utils/__pycache__/test_threads.cpython-36.pyc,, +celery/tests/__pycache__/case.cpython-36.pyc,, +celery/tests/__pycache__/__init__.cpython-36.pyc,, +celery/tests/contrib/__pycache__/test_migrate.cpython-36.pyc,, +celery/tests/contrib/__pycache__/test_rdb.cpython-36.pyc,, +celery/tests/contrib/__pycache__/test_abortable.cpython-36.pyc,, +celery/tests/contrib/__pycache__/test_methods.cpython-36.pyc,, +celery/tests/contrib/__pycache__/__init__.cpython-36.pyc,, +celery/tests/concurrency/__pycache__/test_solo.cpython-36.pyc,, +celery/tests/concurrency/__pycache__/test_gevent.cpython-36.pyc,, +celery/tests/concurrency/__pycache__/test_concurrency.cpython-36.pyc,, +celery/tests/concurrency/__pycache__/test_eventlet.cpython-36.pyc,, +celery/tests/concurrency/__pycache__/test_pool.cpython-36.pyc,, +celery/tests/concurrency/__pycache__/__init__.cpython-36.pyc,, +celery/tests/concurrency/__pycache__/test_prefork.cpython-36.pyc,, +celery/tests/concurrency/__pycache__/test_threads.cpython-36.pyc,, +celery/tests/fixups/__pycache__/test_django.cpython-36.pyc,, +celery/tests/fixups/__pycache__/__init__.cpython-36.pyc,, +celery/tests/worker/__pycache__/test_consumer.cpython-36.pyc,, +celery/tests/worker/__pycache__/test_request.cpython-36.pyc,, +celery/tests/worker/__pycache__/test_autoreload.cpython-36.pyc,, +celery/tests/worker/__pycache__/test_worker.cpython-36.pyc,, +celery/tests/worker/__pycache__/test_control.cpython-36.pyc,, +celery/tests/worker/__pycache__/test_components.cpython-36.pyc,, +celery/tests/worker/__pycache__/test_revoke.cpython-36.pyc,, +celery/tests/worker/__pycache__/test_hub.cpython-36.pyc,, +celery/tests/worker/__pycache__/test_bootsteps.cpython-36.pyc,, +celery/tests/worker/__pycache__/test_autoscale.cpython-36.pyc,, +celery/tests/worker/__pycache__/test_state.cpython-36.pyc,, +celery/tests/worker/__pycache__/__init__.cpython-36.pyc,, +celery/tests/worker/__pycache__/test_heartbeat.cpython-36.pyc,, +celery/tests/worker/__pycache__/test_loops.cpython-36.pyc,, +celery/tests/worker/__pycache__/test_strategy.cpython-36.pyc,, +celery/tests/events/__pycache__/test_cursesmon.cpython-36.pyc,, +celery/tests/events/__pycache__/test_events.cpython-36.pyc,, +celery/tests/events/__pycache__/test_snapshot.cpython-36.pyc,, +celery/tests/events/__pycache__/test_state.cpython-36.pyc,, +celery/tests/events/__pycache__/__init__.cpython-36.pyc,, +celery/tests/functional/__pycache__/tasks.cpython-36.pyc,, +celery/tests/functional/__pycache__/case.cpython-36.pyc,, +celery/tests/functional/__pycache__/__init__.cpython-36.pyc,, +celery/utils/dispatch/__pycache__/saferef.cpython-36.pyc,, +celery/utils/dispatch/__pycache__/signal.cpython-36.pyc,, +celery/utils/dispatch/__pycache__/__init__.cpython-36.pyc,, +celery/utils/__pycache__/timer2.cpython-36.pyc,, +celery/utils/__pycache__/debug.cpython-36.pyc,, +celery/utils/__pycache__/sysinfo.cpython-36.pyc,, +celery/utils/__pycache__/term.cpython-36.pyc,, +celery/utils/__pycache__/imports.cpython-36.pyc,, +celery/utils/__pycache__/mail.cpython-36.pyc,, +celery/utils/__pycache__/functional.cpython-36.pyc,, +celery/utils/__pycache__/timeutils.cpython-36.pyc,, +celery/utils/__pycache__/objects.cpython-36.pyc,, +celery/utils/__pycache__/text.cpython-36.pyc,, +celery/utils/__pycache__/encoding.cpython-36.pyc,, +celery/utils/__pycache__/compat.cpython-36.pyc,, +celery/utils/__pycache__/log.cpython-36.pyc,, +celery/utils/__pycache__/threads.cpython-36.pyc,, +celery/utils/__pycache__/iso8601.cpython-36.pyc,, +celery/utils/__pycache__/serialization.cpython-36.pyc,, +celery/utils/__pycache__/__init__.cpython-36.pyc,, +celery/__pycache__/beat.cpython-36.pyc,, +celery/__pycache__/schedules.cpython-36.pyc,, +celery/__pycache__/exceptions.cpython-36.pyc,, +celery/__pycache__/datastructures.cpython-36.pyc,, +celery/__pycache__/result.cpython-36.pyc,, +celery/__pycache__/signals.cpython-36.pyc,, +celery/__pycache__/_state.cpython-36.pyc,, +celery/__pycache__/__main__.cpython-36.pyc,, +celery/__pycache__/canvas.cpython-36.pyc,, +celery/__pycache__/five.cpython-36.pyc,, +celery/__pycache__/local.cpython-36.pyc,, +celery/__pycache__/bootsteps.cpython-36.pyc,, +celery/__pycache__/platforms.cpython-36.pyc,, +celery/__pycache__/states.cpython-36.pyc,, +celery/__pycache__/__init__.cpython-36.pyc,, +celery/contrib/__pycache__/rdb.cpython-36.pyc,, +celery/contrib/__pycache__/migrate.cpython-36.pyc,, +celery/contrib/__pycache__/abortable.cpython-36.pyc,, +celery/contrib/__pycache__/batches.cpython-36.pyc,, +celery/contrib/__pycache__/methods.cpython-36.pyc,, +celery/contrib/__pycache__/sphinx.cpython-36.pyc,, +celery/contrib/__pycache__/__init__.cpython-36.pyc,, +celery/concurrency/__pycache__/asynpool.cpython-36.pyc,, +celery/concurrency/__pycache__/gevent.cpython-36.pyc,, +celery/concurrency/__pycache__/base.cpython-36.pyc,, +celery/concurrency/__pycache__/threads.cpython-36.pyc,, +celery/concurrency/__pycache__/prefork.cpython-36.pyc,, +celery/concurrency/__pycache__/eventlet.cpython-36.pyc,, +celery/concurrency/__pycache__/__init__.cpython-36.pyc,, +celery/concurrency/__pycache__/solo.cpython-36.pyc,, +celery/task/__pycache__/trace.cpython-36.pyc,, +celery/task/__pycache__/sets.cpython-36.pyc,, +celery/task/__pycache__/base.cpython-36.pyc,, +celery/task/__pycache__/http.cpython-36.pyc,, +celery/task/__pycache__/__init__.cpython-36.pyc,, +celery/fixups/__pycache__/django.cpython-36.pyc,, +celery/fixups/__pycache__/__init__.cpython-36.pyc,, +celery/worker/__pycache__/heartbeat.cpython-36.pyc,, +celery/worker/__pycache__/autoscale.cpython-36.pyc,, +celery/worker/__pycache__/strategy.cpython-36.pyc,, +celery/worker/__pycache__/request.cpython-36.pyc,, +celery/worker/__pycache__/job.cpython-36.pyc,, +celery/worker/__pycache__/state.cpython-36.pyc,, +celery/worker/__pycache__/control.cpython-36.pyc,, +celery/worker/__pycache__/pidbox.cpython-36.pyc,, +celery/worker/__pycache__/loops.cpython-36.pyc,, +celery/worker/__pycache__/components.cpython-36.pyc,, +celery/worker/__pycache__/consumer.cpython-36.pyc,, +celery/worker/__pycache__/autoreload.cpython-36.pyc,, +celery/worker/__pycache__/__init__.cpython-36.pyc,, +celery/events/__pycache__/state.cpython-36.pyc,, +celery/events/__pycache__/cursesmon.cpython-36.pyc,, +celery/events/__pycache__/__init__.cpython-36.pyc,, +celery/events/__pycache__/dumper.cpython-36.pyc,, +celery/events/__pycache__/snapshot.cpython-36.pyc,, +celery/apps/__pycache__/beat.cpython-36.pyc,, +celery/apps/__pycache__/__init__.cpython-36.pyc,, +celery/apps/__pycache__/worker.cpython-36.pyc,, +celery/loaders/__pycache__/app.cpython-36.pyc,, +celery/loaders/__pycache__/default.cpython-36.pyc,, +celery/loaders/__pycache__/base.cpython-36.pyc,, +celery/loaders/__pycache__/__init__.cpython-36.pyc,, diff --git a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/WHEEL b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/WHEEL new file mode 100644 index 0000000..7332a41 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.30.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/entry_points.txt b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/entry_points.txt new file mode 100644 index 0000000..26ac737 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/entry_points.txt @@ -0,0 +1,6 @@ +[console_scripts] +celery = celery.__main__:main +celerybeat = celery.__main__:_compat_beat +celeryd = celery.__main__:_compat_worker +celeryd-multi = celery.__main__:_compat_multi + diff --git a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/metadata.json b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/metadata.json new file mode 100644 index 0000000..5506506 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 5 - Production/Stable", "License :: OSI Approved :: BSD License", "Topic :: System :: Distributed Computing", "Topic :: Software Development :: Object Brokering", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: Python :: Implementation :: Jython", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Operating System :: MacOS :: MacOS X"], "description_content_type": "UNKNOWN", "extensions": {"python.commands": {"wrap_console": {"celery": "celery.__main__:main", "celerybeat": "celery.__main__:_compat_beat", "celeryd": "celery.__main__:_compat_worker", "celeryd-multi": "celery.__main__:_compat_multi"}}, "python.details": {"contacts": [{"email": "ask@celeryproject.org", "name": "Ask Solem", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://celeryproject.org"}}, "python.exports": {"console_scripts": {"celery": "celery.__main__:main", "celerybeat": "celery.__main__:_compat_beat", "celeryd": "celery.__main__:_compat_worker", "celeryd-multi": "celery.__main__:_compat_multi"}}}, "extras": ["auth", "beanstalk", "cassandra", "couchbase", "couchdb", "eventlet", "gevent", "librabbitmq", "memcache", "mongodb", "msgpack", "pyro", "redis", "slmq", "sqlalchemy", "sqs", "threads", "yaml", "zeromq", "zookeeper"], "generator": "bdist_wheel (0.30.0)", "license": "BSD", "metadata_version": "2.0", "name": "celery", "platform": "any", "run_requires": [{"extra": "yaml", "requires": ["PyYAML (>=3.10)"]}, {"extra": "beanstalk", "requires": ["beanstalkc"]}, {"requires": ["billiard (<3.4,>=3.3.0.23)", "kombu (<3.1,>=3.0.37)", "pytz (>dev)"]}, {"extra": "sqs", "requires": ["boto (>=2.13.3)"]}, {"extra": "couchbase", "requires": ["couchbase"]}, {"extra": "couchdb", "requires": ["couchdb"]}, {"extra": "eventlet", "requires": ["eventlet"]}, {"extra": "gevent", "requires": ["gevent"]}, {"extra": "zookeeper", "requires": ["kazoo (>=1.3.1)"]}, {"extra": "librabbitmq", "requires": ["librabbitmq (>=1.6.1)"]}, {"extra": "msgpack", "requires": ["msgpack-python (>=0.3.0)"]}, {"extra": "auth", "requires": ["pyOpenSSL"]}, {"extra": "cassandra", "requires": ["pycassa"]}, {"extra": "memcache", "requires": ["pylibmc"]}, {"extra": "mongodb", "requires": ["pymongo (>=2.6.2)"]}, {"extra": "pyro", "requires": ["pyro4"]}, {"extra": "zeromq", "requires": ["pyzmq (>=13.1.0)"]}, {"extra": "redis", "requires": ["redis (>=2.8.0)"]}, {"extra": "slmq", "requires": ["softlayer-messaging (>=1.0.3)"]}, {"extra": "sqlalchemy", "requires": ["sqlalchemy"]}, {"extra": "threads", "requires": ["threadpool"]}], "summary": "Distributed Task Queue", "test_requires": [{"requires": ["mock (>=1.0.1)", "nose", "unittest2 (>=0.5.1)"]}], "version": "3.1.26.post2"} \ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/top_level.txt b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/top_level.txt new file mode 100644 index 0000000..74f9e8f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery-3.1.26.post2.dist-info/top_level.txt @@ -0,0 +1 @@ +celery diff --git a/thesisenv/lib/python3.6/site-packages/celery/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/__init__.py new file mode 100644 index 0000000..ba5f057 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/__init__.py @@ -0,0 +1,155 @@ +# -*- coding: utf-8 -*- +"""Distributed Task Queue""" +# :copyright: (c) 2015 Ask Solem and individual contributors. +# All rights # reserved. +# :copyright: (c) 2012-2014 GoPivotal, Inc., All rights reserved. +# :copyright: (c) 2009 - 2012 Ask Solem and individual contributors, +# All rights reserved. +# :license: BSD (3 Clause), see LICENSE for more details. + +from __future__ import absolute_import + +import os +import sys + +from collections import namedtuple + +version_info_t = namedtuple( + 'version_info_t', ('major', 'minor', 'micro', 'releaselevel', 'serial'), +) + +SERIES = 'Cipater' +VERSION = version_info_t(3, 1, 26, '.post2', '') +__version__ = '{0.major}.{0.minor}.{0.micro}{0.releaselevel}'.format(VERSION) +__author__ = 'Ask Solem' +__contact__ = 'ask@celeryproject.org' +__homepage__ = 'http://celeryproject.org' +__docformat__ = 'restructuredtext' +__all__ = [ + 'Celery', 'bugreport', 'shared_task', 'task', + 'current_app', 'current_task', 'maybe_signature', + 'chain', 'chord', 'chunks', 'group', 'signature', + 'xmap', 'xstarmap', 'uuid', 'version', '__version__', +] +VERSION_BANNER = '{0} ({1})'.format(__version__, SERIES) + +# -eof meta- + +if os.environ.get('C_IMPDEBUG'): # pragma: no cover + from .five import builtins + real_import = builtins.__import__ + + def debug_import(name, locals=None, globals=None, + fromlist=None, level=-1): + glob = globals or getattr(sys, 'emarfteg_'[::-1])(1).f_globals + importer_name = glob and glob.get('__name__') or 'unknown' + print('-- {0} imports {1}'.format(importer_name, name)) + return real_import(name, locals, globals, fromlist, level) + builtins.__import__ = debug_import + +# This is never executed, but tricks static analyzers (PyDev, PyCharm, +# pylint, etc.) into knowing the types of these symbols, and what +# they contain. +STATICA_HACK = True +globals()['kcah_acitats'[::-1].upper()] = False +if STATICA_HACK: # pragma: no cover + from celery.app import shared_task # noqa + from celery.app.base import Celery # noqa + from celery.app.utils import bugreport # noqa + from celery.app.task import Task # noqa + from celery._state import current_app, current_task # noqa + from celery.canvas import ( # noqa + chain, chord, chunks, group, + signature, maybe_signature, xmap, xstarmap, subtask, + ) + from celery.utils import uuid # noqa + +# Eventlet/gevent patching must happen before importing +# anything else, so these tools must be at top-level. + + +def _find_option_with_arg(argv, short_opts=None, long_opts=None): + """Search argv for option specifying its short and longopt + alternatives. + + Return the value of the option if found. + + """ + for i, arg in enumerate(argv): + if arg.startswith('-'): + if long_opts and arg.startswith('--'): + name, _, val = arg.partition('=') + if name in long_opts: + return val + if short_opts and arg in short_opts: + return argv[i + 1] + raise KeyError('|'.join(short_opts or [] + long_opts or [])) + + +def _patch_eventlet(): + import eventlet + import eventlet.debug + eventlet.monkey_patch() + EVENTLET_DBLOCK = int(os.environ.get('EVENTLET_NOBLOCK', 0)) + if EVENTLET_DBLOCK: + eventlet.debug.hub_blocking_detection(EVENTLET_DBLOCK) + + +def _patch_gevent(): + from gevent import monkey, version_info + monkey.patch_all() + if version_info[0] == 0: # pragma: no cover + # Signals aren't working in gevent versions <1.0, + # and are not monkey patched by patch_all() + from gevent import signal as _gevent_signal + _signal = __import__('signal') + _signal.signal = _gevent_signal + + +def maybe_patch_concurrency(argv=sys.argv, + short_opts=['-P'], long_opts=['--pool'], + patches={'eventlet': _patch_eventlet, + 'gevent': _patch_gevent}): + """With short and long opt alternatives that specify the command line + option to set the pool, this makes sure that anything that needs + to be patched is completed as early as possible. + (e.g. eventlet/gevent monkey patches).""" + try: + pool = _find_option_with_arg(argv, short_opts, long_opts) + except KeyError: + pass + else: + try: + patcher = patches[pool] + except KeyError: + pass + else: + patcher() + # set up eventlet/gevent environments ASAP. + from celery import concurrency + concurrency.get_implementation(pool) + +# Lazy loading +from celery import five # noqa + +old_module, new_module = five.recreate_module( # pragma: no cover + __name__, + by_module={ + 'celery.app': ['Celery', 'bugreport', 'shared_task'], + 'celery.app.task': ['Task'], + 'celery._state': ['current_app', 'current_task'], + 'celery.canvas': ['chain', 'chord', 'chunks', 'group', + 'signature', 'maybe_signature', 'subtask', + 'xmap', 'xstarmap'], + 'celery.utils': ['uuid'], + }, + direct={'task': 'celery.task'}, + __package__='celery', __file__=__file__, + __path__=__path__, __doc__=__doc__, __version__=__version__, + __author__=__author__, __contact__=__contact__, + __homepage__=__homepage__, __docformat__=__docformat__, five=five, + VERSION=VERSION, SERIES=SERIES, VERSION_BANNER=VERSION_BANNER, + version_info_t=version_info_t, + maybe_patch_concurrency=maybe_patch_concurrency, + _find_option_with_arg=_find_option_with_arg, +) diff --git a/thesisenv/lib/python3.6/site-packages/celery/__main__.py b/thesisenv/lib/python3.6/site-packages/celery/__main__.py new file mode 100644 index 0000000..04448e2 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/__main__.py @@ -0,0 +1,54 @@ +from __future__ import absolute_import + +import sys + +from os.path import basename + +from . import maybe_patch_concurrency + +__all__ = ['main'] + +DEPRECATED_FMT = """ +The {old!r} command is deprecated, please use {new!r} instead: + +$ {new_argv} + +""" + + +def _warn_deprecated(new): + print(DEPRECATED_FMT.format( + old=basename(sys.argv[0]), new=new, + new_argv=' '.join([new] + sys.argv[1:])), + ) + + +def main(): + if 'multi' not in sys.argv: + maybe_patch_concurrency() + from celery.bin.celery import main + main() + + +def _compat_worker(): + maybe_patch_concurrency() + _warn_deprecated('celery worker') + from celery.bin.worker import main + main() + + +def _compat_multi(): + _warn_deprecated('celery multi') + from celery.bin.multi import main + main() + + +def _compat_beat(): + maybe_patch_concurrency() + _warn_deprecated('celery beat') + from celery.bin.beat import main + main() + + +if __name__ == '__main__': # pragma: no cover + main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/_state.py b/thesisenv/lib/python3.6/site-packages/celery/_state.py new file mode 100644 index 0000000..755bb92 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/_state.py @@ -0,0 +1,159 @@ +# -*- coding: utf-8 -*- +""" + celery._state + ~~~~~~~~~~~~~~~ + + This is an internal module containing thread state + like the ``current_app``, and ``current_task``. + + This module shouldn't be used directly. + +""" +from __future__ import absolute_import, print_function + +import os +import sys +import threading +import weakref + +from celery.local import Proxy +from celery.utils.threads import LocalStack + +try: + from weakref import WeakSet as AppSet +except ImportError: # XXX Py2.6 + + class AppSet(object): # noqa + + def __init__(self): + self._refs = set() + + def add(self, app): + self._refs.add(weakref.ref(app)) + + def __iter__(self): + dirty = [] + try: + for appref in self._refs: + app = appref() + if app is None: + dirty.append(appref) + else: + yield app + finally: + while dirty: + self._refs.discard(dirty.pop()) + +__all__ = ['set_default_app', 'get_current_app', 'get_current_task', + 'get_current_worker_task', 'current_app', 'current_task', + 'connect_on_app_finalize'] + +#: Global default app used when no current app. +default_app = None + +#: List of all app instances (weakrefs), must not be used directly. +_apps = AppSet() + +#: global set of functions to call whenever a new app is finalized +#: E.g. Shared tasks, and builtin tasks are created +#: by adding callbacks here. +_on_app_finalizers = set() + +_task_join_will_block = False + + +def connect_on_app_finalize(callback): + _on_app_finalizers.add(callback) + return callback + + +def _announce_app_finalized(app): + callbacks = set(_on_app_finalizers) + for callback in callbacks: + callback(app) + + +def _set_task_join_will_block(blocks): + global _task_join_will_block + _task_join_will_block = blocks + + +def task_join_will_block(): + return _task_join_will_block + + +class _TLS(threading.local): + #: Apps with the :attr:`~celery.app.base.BaseApp.set_as_current` attribute + #: sets this, so it will always contain the last instantiated app, + #: and is the default app returned by :func:`app_or_default`. + current_app = None +_tls = _TLS() + +_task_stack = LocalStack() + + +def set_default_app(app): + global default_app + default_app = app + + +def _get_current_app(): + if default_app is None: + #: creates the global fallback app instance. + from celery.app import Celery + set_default_app(Celery( + 'default', + loader=os.environ.get('CELERY_LOADER') or 'default', + fixups=[], + set_as_current=False, accept_magic_kwargs=True, + )) + return _tls.current_app or default_app + + +def _set_current_app(app): + _tls.current_app = app + + +C_STRICT_APP = os.environ.get('C_STRICT_APP') +if os.environ.get('C_STRICT_APP'): # pragma: no cover + def get_current_app(): + raise Exception('USES CURRENT APP') + import traceback + print('-- USES CURRENT_APP', file=sys.stderr) # noqa+ + traceback.print_stack(file=sys.stderr) + return _get_current_app() +else: + get_current_app = _get_current_app + + +def get_current_task(): + """Currently executing task.""" + return _task_stack.top + + +def get_current_worker_task(): + """Currently executing task, that was applied by the worker. + + This is used to differentiate between the actual task + executed by the worker and any task that was called within + a task (using ``task.__call__`` or ``task.apply``) + + """ + for task in reversed(_task_stack.stack): + if not task.request.called_directly: + return task + + +#: Proxy to current app. +current_app = Proxy(get_current_app) + +#: Proxy to current task. +current_task = Proxy(get_current_task) + + +def _register_app(app): + _apps.add(app) + + +def _get_active_apps(): + return _apps diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/app/__init__.py new file mode 100644 index 0000000..952a874 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/app/__init__.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- +""" + celery.app + ~~~~~~~~~~ + + Celery Application. + +""" +from __future__ import absolute_import + +import os + +from celery.local import Proxy +from celery import _state +from celery._state import ( + get_current_app as current_app, + get_current_task as current_task, + connect_on_app_finalize, set_default_app, _get_active_apps, _task_stack, +) +from celery.utils import gen_task_name + +from .base import Celery, AppPickler + +__all__ = ['Celery', 'AppPickler', 'default_app', 'app_or_default', + 'bugreport', 'enable_trace', 'disable_trace', 'shared_task', + 'set_default_app', 'current_app', 'current_task', + 'push_current_task', 'pop_current_task'] + +#: Proxy always returning the app set as default. +default_app = Proxy(lambda: _state.default_app) + +#: Function returning the app provided or the default app if none. +#: +#: The environment variable :envvar:`CELERY_TRACE_APP` is used to +#: trace app leaks. When enabled an exception is raised if there +#: is no active app. +app_or_default = None + +#: The 'default' loader is the default loader used by old applications. +#: This is deprecated and should no longer be used as it's set too early +#: to be affected by --loader argument. +default_loader = os.environ.get('CELERY_LOADER') or 'default' # XXX + + +#: Function used to push a task to the thread local stack +#: keeping track of the currently executing task. +#: You must remember to pop the task after. +push_current_task = _task_stack.push + +#: Function used to pop a task from the thread local stack +#: keeping track of the currently executing task. +pop_current_task = _task_stack.pop + + +def bugreport(app=None): + return (app or current_app()).bugreport() + + +def _app_or_default(app=None): + if app is None: + return _state.get_current_app() + return app + + +def _app_or_default_trace(app=None): # pragma: no cover + from traceback import print_stack + from billiard import current_process + if app is None: + if getattr(_state._tls, 'current_app', None): + print('-- RETURNING TO CURRENT APP --') # noqa+ + print_stack() + return _state._tls.current_app + if current_process()._name == 'MainProcess': + raise Exception('DEFAULT APP') + print('-- RETURNING TO DEFAULT APP --') # noqa+ + print_stack() + return _state.default_app + return app + + +def enable_trace(): + global app_or_default + app_or_default = _app_or_default_trace + + +def disable_trace(): + global app_or_default + app_or_default = _app_or_default + +if os.environ.get('CELERY_TRACE_APP'): # pragma: no cover + enable_trace() +else: + disable_trace() + +App = Celery # XXX Compat + + +def shared_task(*args, **kwargs): + """Create shared tasks (decorator). + Will return a proxy that always takes the task from the current apps + task registry. + + This can be used by library authors to create tasks that will work + for any app environment. + + Example: + + >>> from celery import Celery, shared_task + >>> @shared_task + ... def add(x, y): + ... return x + y + + >>> app1 = Celery(broker='amqp://') + >>> add.app is app1 + True + + >>> app2 = Celery(broker='redis://') + >>> add.app is app2 + + """ + + def create_shared_task(**options): + + def __inner(fun): + name = options.get('name') + # Set as shared task so that unfinalized apps, + # and future apps will load the task. + connect_on_app_finalize( + lambda app: app._task_from_fun(fun, **options) + ) + + # Force all finalized apps to take this task as well. + for app in _get_active_apps(): + if app.finalized: + with app._finalize_mutex: + app._task_from_fun(fun, **options) + + # Return a proxy that always gets the task from the current + # apps task registry. + def task_by_cons(): + app = current_app() + return app.tasks[ + name or gen_task_name(app, fun.__name__, fun.__module__) + ] + return Proxy(task_by_cons) + return __inner + + if len(args) == 1 and callable(args[0]): + return create_shared_task(**kwargs)(args[0]) + return create_shared_task(*args, **kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/amqp.py b/thesisenv/lib/python3.6/site-packages/celery/app/amqp.py new file mode 100644 index 0000000..27838c2 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/app/amqp.py @@ -0,0 +1,512 @@ +# -*- coding: utf-8 -*- +""" + celery.app.amqp + ~~~~~~~~~~~~~~~ + + Sending and receiving messages using Kombu. + +""" +from __future__ import absolute_import + +import numbers + +from datetime import timedelta +from weakref import WeakValueDictionary + +from kombu import Connection, Consumer, Exchange, Producer, Queue +from kombu.common import Broadcast +from kombu.pools import ProducerPool +from kombu.utils import cached_property, uuid +from kombu.utils.encoding import safe_repr +from kombu.utils.functional import maybe_list + +from celery import signals +from celery.five import items, string_t +from celery.utils.text import indent as textindent +from celery.utils.timeutils import to_utc + +from . import app_or_default +from . import routes as _routes + +__all__ = ['AMQP', 'Queues', 'TaskProducer', 'TaskConsumer'] + +#: earliest date supported by time.mktime. +INT_MIN = -2147483648 + +#: Human readable queue declaration. +QUEUE_FORMAT = """ +.> {0.name:<16} exchange={0.exchange.name}({0.exchange.type}) \ +key={0.routing_key} +""" + + +class Queues(dict): + """Queue name⇒ declaration mapping. + + :param queues: Initial list/tuple or dict of queues. + :keyword create_missing: By default any unknown queues will be + added automatically, but if disabled + the occurrence of unknown queues + in `wanted` will raise :exc:`KeyError`. + :keyword ha_policy: Default HA policy for queues with none set. + + + """ + #: If set, this is a subset of queues to consume from. + #: The rest of the queues are then used for routing only. + _consume_from = None + + def __init__(self, queues=None, default_exchange=None, + create_missing=True, ha_policy=None, autoexchange=None): + dict.__init__(self) + self.aliases = WeakValueDictionary() + self.default_exchange = default_exchange + self.create_missing = create_missing + self.ha_policy = ha_policy + self.autoexchange = Exchange if autoexchange is None else autoexchange + if isinstance(queues, (tuple, list)): + queues = dict((q.name, q) for q in queues) + for name, q in items(queues or {}): + self.add(q) if isinstance(q, Queue) else self.add_compat(name, **q) + + def __getitem__(self, name): + try: + return self.aliases[name] + except KeyError: + return dict.__getitem__(self, name) + + def __setitem__(self, name, queue): + if self.default_exchange and (not queue.exchange or + not queue.exchange.name): + queue.exchange = self.default_exchange + dict.__setitem__(self, name, queue) + if queue.alias: + self.aliases[queue.alias] = queue + + def __missing__(self, name): + if self.create_missing: + return self.add(self.new_missing(name)) + raise KeyError(name) + + def add(self, queue, **kwargs): + """Add new queue. + + The first argument can either be a :class:`kombu.Queue` instance, + or the name of a queue. If the former the rest of the keyword + arguments are ignored, and options are simply taken from the queue + instance. + + :param queue: :class:`kombu.Queue` instance or name of the queue. + :keyword exchange: (if named) specifies exchange name. + :keyword routing_key: (if named) specifies binding key. + :keyword exchange_type: (if named) specifies type of exchange. + :keyword \*\*options: (if named) Additional declaration options. + + """ + if not isinstance(queue, Queue): + return self.add_compat(queue, **kwargs) + if self.ha_policy: + if queue.queue_arguments is None: + queue.queue_arguments = {} + self._set_ha_policy(queue.queue_arguments) + self[queue.name] = queue + return queue + + def add_compat(self, name, **options): + # docs used to use binding_key as routing key + options.setdefault('routing_key', options.get('binding_key')) + if options['routing_key'] is None: + options['routing_key'] = name + if self.ha_policy is not None: + self._set_ha_policy(options.setdefault('queue_arguments', {})) + q = self[name] = Queue.from_dict(name, **options) + return q + + def _set_ha_policy(self, args): + policy = self.ha_policy + if isinstance(policy, (list, tuple)): + return args.update({'x-ha-policy': 'nodes', + 'x-ha-policy-params': list(policy)}) + args['x-ha-policy'] = policy + + def format(self, indent=0, indent_first=True): + """Format routing table into string for log dumps.""" + active = self.consume_from + if not active: + return '' + info = [QUEUE_FORMAT.strip().format(q) + for _, q in sorted(items(active))] + if indent_first: + return textindent('\n'.join(info), indent) + return info[0] + '\n' + textindent('\n'.join(info[1:]), indent) + + def select_add(self, queue, **kwargs): + """Add new task queue that will be consumed from even when + a subset has been selected using the :option:`-Q` option.""" + q = self.add(queue, **kwargs) + if self._consume_from is not None: + self._consume_from[q.name] = q + return q + + def select(self, include): + """Sets :attr:`consume_from` by selecting a subset of the + currently defined queues. + + :param include: Names of queues to consume from. + Can be iterable or string. + """ + if include: + self._consume_from = dict((name, self[name]) + for name in maybe_list(include)) + select_subset = select # XXX compat + + def deselect(self, exclude): + """Deselect queues so that they will not be consumed from. + + :param exclude: Names of queues to avoid consuming from. + Can be iterable or string. + + """ + if exclude: + exclude = maybe_list(exclude) + if self._consume_from is None: + # using selection + return self.select(k for k in self if k not in exclude) + # using all queues + for queue in exclude: + self._consume_from.pop(queue, None) + select_remove = deselect # XXX compat + + def new_missing(self, name): + return Queue(name, self.autoexchange(name), name) + + @property + def consume_from(self): + if self._consume_from is not None: + return self._consume_from + return self + + +class TaskProducer(Producer): + app = None + auto_declare = False + retry = False + retry_policy = None + utc = True + event_dispatcher = None + send_sent_event = False + + def __init__(self, channel=None, exchange=None, *args, **kwargs): + self.retry = kwargs.pop('retry', self.retry) + self.retry_policy = kwargs.pop('retry_policy', + self.retry_policy or {}) + self.send_sent_event = kwargs.pop('send_sent_event', + self.send_sent_event) + exchange = exchange or self.exchange + self.queues = self.app.amqp.queues # shortcut + self.default_queue = self.app.amqp.default_queue + self._default_mode = self.app.conf.CELERY_DEFAULT_DELIVERY_MODE + super(TaskProducer, self).__init__(channel, exchange, *args, **kwargs) + + def publish_task(self, task_name, task_args=None, task_kwargs=None, + countdown=None, eta=None, task_id=None, group_id=None, + taskset_id=None, # compat alias to group_id + expires=None, exchange=None, exchange_type=None, + event_dispatcher=None, retry=None, retry_policy=None, + queue=None, now=None, retries=0, chord=None, + callbacks=None, errbacks=None, routing_key=None, + serializer=None, delivery_mode=None, compression=None, + reply_to=None, time_limit=None, soft_time_limit=None, + declare=None, headers=None, + send_before_publish=signals.before_task_publish.send, + before_receivers=signals.before_task_publish.receivers, + send_after_publish=signals.after_task_publish.send, + after_receivers=signals.after_task_publish.receivers, + send_task_sent=signals.task_sent.send, # XXX deprecated + sent_receivers=signals.task_sent.receivers, + **kwargs): + """Send task message.""" + retry = self.retry if retry is None else retry + headers = {} if headers is None else headers + + qname = queue + if queue is None and exchange is None: + queue = self.default_queue + if queue is not None: + if isinstance(queue, string_t): + qname, queue = queue, self.queues[queue] + else: + qname = queue.name + exchange = exchange or queue.exchange.name + routing_key = routing_key or queue.routing_key + if declare is None and queue and not isinstance(queue, Broadcast): + declare = [queue] + if delivery_mode is None: + delivery_mode = self._default_mode + + # merge default and custom policy + retry = self.retry if retry is None else retry + _rp = (dict(self.retry_policy, **retry_policy) if retry_policy + else self.retry_policy) + task_id = task_id or uuid() + task_args = task_args or [] + task_kwargs = task_kwargs or {} + if not isinstance(task_args, (list, tuple)): + raise ValueError('task args must be a list or tuple') + if not isinstance(task_kwargs, dict): + raise ValueError('task kwargs must be a dictionary') + if countdown: # Convert countdown to ETA. + self._verify_seconds(countdown, 'countdown') + now = now or self.app.now() + eta = now + timedelta(seconds=countdown) + if self.utc: + eta = to_utc(eta).astimezone(self.app.timezone) + if isinstance(expires, numbers.Real): + self._verify_seconds(expires, 'expires') + now = now or self.app.now() + expires = now + timedelta(seconds=expires) + if self.utc: + expires = to_utc(expires).astimezone(self.app.timezone) + eta = eta and eta.isoformat() + expires = expires and expires.isoformat() + + body = { + 'task': task_name, + 'id': task_id, + 'args': task_args, + 'kwargs': task_kwargs, + 'retries': retries or 0, + 'eta': eta, + 'expires': expires, + 'utc': self.utc, + 'callbacks': callbacks, + 'errbacks': errbacks, + 'timelimit': (time_limit, soft_time_limit), + 'taskset': group_id or taskset_id, + 'chord': chord, + } + + if before_receivers: + send_before_publish( + sender=task_name, body=body, + exchange=exchange, + routing_key=routing_key, + declare=declare, + headers=headers, + properties=kwargs, + retry_policy=retry_policy, + ) + + self.publish( + body, + exchange=exchange, routing_key=routing_key, + serializer=serializer or self.serializer, + compression=compression or self.compression, + headers=headers, + retry=retry, retry_policy=_rp, + reply_to=reply_to, + correlation_id=task_id, + delivery_mode=delivery_mode, declare=declare, + **kwargs + ) + + if after_receivers: + send_after_publish(sender=task_name, body=body, + exchange=exchange, routing_key=routing_key) + + if sent_receivers: # XXX deprecated + send_task_sent(sender=task_name, task_id=task_id, + task=task_name, args=task_args, + kwargs=task_kwargs, eta=eta, + taskset=group_id or taskset_id) + if self.send_sent_event: + evd = event_dispatcher or self.event_dispatcher + exname = exchange or self.exchange + if isinstance(exname, Exchange): + exname = exname.name + evd.publish( + 'task-sent', + { + 'uuid': task_id, + 'name': task_name, + 'args': safe_repr(task_args), + 'kwargs': safe_repr(task_kwargs), + 'retries': retries, + 'eta': eta, + 'expires': expires, + 'queue': qname, + 'exchange': exname, + 'routing_key': routing_key, + }, + self, retry=retry, retry_policy=retry_policy, + ) + return task_id + delay_task = publish_task # XXX Compat + + def _verify_seconds(self, s, what): + if s < INT_MIN: + raise ValueError('%s is out of range: %r' % (what, s)) + return s + + @cached_property + def event_dispatcher(self): + # We call Dispatcher.publish with a custom producer + # so don't need the dispatcher to be "enabled". + return self.app.events.Dispatcher(enabled=False) + + +class TaskPublisher(TaskProducer): + """Deprecated version of :class:`TaskProducer`.""" + + def __init__(self, channel=None, exchange=None, *args, **kwargs): + self.app = app_or_default(kwargs.pop('app', self.app)) + self.retry = kwargs.pop('retry', self.retry) + self.retry_policy = kwargs.pop('retry_policy', + self.retry_policy or {}) + exchange = exchange or self.exchange + if not isinstance(exchange, Exchange): + exchange = Exchange(exchange, + kwargs.pop('exchange_type', 'direct')) + self.queues = self.app.amqp.queues # shortcut + super(TaskPublisher, self).__init__(channel, exchange, *args, **kwargs) + + +class TaskConsumer(Consumer): + app = None + + def __init__(self, channel, queues=None, app=None, accept=None, **kw): + self.app = app or self.app + if accept is None: + accept = self.app.conf.CELERY_ACCEPT_CONTENT + super(TaskConsumer, self).__init__( + channel, + queues or list(self.app.amqp.queues.consume_from.values()), + accept=accept, + **kw + ) + + +class AMQP(object): + Connection = Connection + Consumer = Consumer + + #: compat alias to Connection + BrokerConnection = Connection + + producer_cls = TaskProducer + consumer_cls = TaskConsumer + queues_cls = Queues + + #: Cached and prepared routing table. + _rtable = None + + #: Underlying producer pool instance automatically + #: set by the :attr:`producer_pool`. + _producer_pool = None + + # Exchange class/function used when defining automatic queues. + # E.g. you can use ``autoexchange = lambda n: None`` to use the + # amqp default exchange, which is a shortcut to bypass routing + # and instead send directly to the queue named in the routing key. + autoexchange = None + + def __init__(self, app): + self.app = app + + def flush_routes(self): + self._rtable = _routes.prepare(self.app.conf.CELERY_ROUTES) + + def Queues(self, queues, create_missing=None, ha_policy=None, + autoexchange=None): + """Create new :class:`Queues` instance, using queue defaults + from the current configuration.""" + conf = self.app.conf + if create_missing is None: + create_missing = conf.CELERY_CREATE_MISSING_QUEUES + if ha_policy is None: + ha_policy = conf.CELERY_QUEUE_HA_POLICY + if not queues and conf.CELERY_DEFAULT_QUEUE: + queues = (Queue(conf.CELERY_DEFAULT_QUEUE, + exchange=self.default_exchange, + routing_key=conf.CELERY_DEFAULT_ROUTING_KEY), ) + autoexchange = (self.autoexchange if autoexchange is None + else autoexchange) + return self.queues_cls( + queues, self.default_exchange, create_missing, + ha_policy, autoexchange, + ) + + def Router(self, queues=None, create_missing=None): + """Return the current task router.""" + return _routes.Router(self.routes, queues or self.queues, + self.app.either('CELERY_CREATE_MISSING_QUEUES', + create_missing), app=self.app) + + @cached_property + def TaskConsumer(self): + """Return consumer configured to consume from the queues + we are configured for (``app.amqp.queues.consume_from``).""" + return self.app.subclass_with_self(self.consumer_cls, + reverse='amqp.TaskConsumer') + get_task_consumer = TaskConsumer # XXX compat + + @cached_property + def TaskProducer(self): + """Return publisher used to send tasks. + + You should use `app.send_task` instead. + + """ + conf = self.app.conf + return self.app.subclass_with_self( + self.producer_cls, + reverse='amqp.TaskProducer', + exchange=self.default_exchange, + routing_key=conf.CELERY_DEFAULT_ROUTING_KEY, + serializer=conf.CELERY_TASK_SERIALIZER, + compression=conf.CELERY_MESSAGE_COMPRESSION, + retry=conf.CELERY_TASK_PUBLISH_RETRY, + retry_policy=conf.CELERY_TASK_PUBLISH_RETRY_POLICY, + send_sent_event=conf.CELERY_SEND_TASK_SENT_EVENT, + utc=conf.CELERY_ENABLE_UTC, + ) + TaskPublisher = TaskProducer # compat + + @cached_property + def default_queue(self): + return self.queues[self.app.conf.CELERY_DEFAULT_QUEUE] + + @cached_property + def queues(self): + """Queue name⇒ declaration mapping.""" + return self.Queues(self.app.conf.CELERY_QUEUES) + + @queues.setter # noqa + def queues(self, queues): + return self.Queues(queues) + + @property + def routes(self): + if self._rtable is None: + self.flush_routes() + return self._rtable + + @cached_property + def router(self): + return self.Router() + + @property + def producer_pool(self): + if self._producer_pool is None: + self._producer_pool = ProducerPool( + self.app.pool, + limit=self.app.pool.limit, + Producer=self.TaskProducer, + ) + return self._producer_pool + publisher_pool = producer_pool # compat alias + + @cached_property + def default_exchange(self): + return Exchange(self.app.conf.CELERY_DEFAULT_EXCHANGE, + self.app.conf.CELERY_DEFAULT_EXCHANGE_TYPE) diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/annotations.py b/thesisenv/lib/python3.6/site-packages/celery/app/annotations.py new file mode 100644 index 0000000..27f436b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/app/annotations.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +""" + celery.app.annotations + ~~~~~~~~~~~~~~~~~~~~~~ + + Annotations is a nice term for monkey patching + task classes in the configuration. + + This prepares and performs the annotations in the + :setting:`CELERY_ANNOTATIONS` setting. + +""" +from __future__ import absolute_import + +from celery.five import string_t +from celery.utils.functional import firstmethod, mlazy +from celery.utils.imports import instantiate + +_first_match = firstmethod('annotate') +_first_match_any = firstmethod('annotate_any') + +__all__ = ['MapAnnotation', 'prepare', 'resolve_all'] + + +class MapAnnotation(dict): + + def annotate_any(self): + try: + return dict(self['*']) + except KeyError: + pass + + def annotate(self, task): + try: + return dict(self[task.name]) + except KeyError: + pass + + +def prepare(annotations): + """Expands the :setting:`CELERY_ANNOTATIONS` setting.""" + + def expand_annotation(annotation): + if isinstance(annotation, dict): + return MapAnnotation(annotation) + elif isinstance(annotation, string_t): + return mlazy(instantiate, annotation) + return annotation + + if annotations is None: + return () + elif not isinstance(annotations, (list, tuple)): + annotations = (annotations, ) + return [expand_annotation(anno) for anno in annotations] + + +def resolve_all(anno, task): + return (x for x in (_first_match(anno, task), _first_match_any(anno)) if x) diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/base.py b/thesisenv/lib/python3.6/site-packages/celery/app/base.py new file mode 100644 index 0000000..8f33c1b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/app/base.py @@ -0,0 +1,675 @@ +# -*- coding: utf-8 -*- +""" + celery.app.base + ~~~~~~~~~~~~~~~ + + Actual App instance implementation. + +""" +from __future__ import absolute_import + +import os +import threading +import warnings + +from collections import defaultdict, deque +from copy import deepcopy +from operator import attrgetter + +from amqp import promise +from billiard.util import register_after_fork +from kombu.clocks import LamportClock +from kombu.common import oid_from +from kombu.utils import cached_property, uuid + +from celery import platforms +from celery import signals +from celery._state import ( + _task_stack, get_current_app, _set_current_app, set_default_app, + _register_app, get_current_worker_task, connect_on_app_finalize, + _announce_app_finalized, +) +from celery.exceptions import AlwaysEagerIgnored, ImproperlyConfigured +from celery.five import values +from celery.loaders import get_loader_cls +from celery.local import PromiseProxy, maybe_evaluate +from celery.utils.functional import first, maybe_list +from celery.utils.imports import instantiate, symbol_by_name +from celery.utils.objects import FallbackContext, mro_lookup + +from .annotations import prepare as prepare_annotations +from .defaults import DEFAULTS, find_deprecated_settings +from .registry import TaskRegistry +from .utils import ( + AppPickler, Settings, bugreport, _unpickle_app, _unpickle_app_v2, appstr, +) + +# Load all builtin tasks +from . import builtins # noqa + +__all__ = ['Celery'] + +_EXECV = os.environ.get('FORKED_BY_MULTIPROCESSING') +BUILTIN_FIXUPS = frozenset([ + 'celery.fixups.django:fixup', +]) + +ERR_ENVVAR_NOT_SET = """\ +The environment variable {0!r} is not set, +and as such the configuration could not be loaded. +Please set this variable and make it point to +a configuration module.""" + +_after_fork_registered = False + + +def app_has_custom(app, attr): + return mro_lookup(app.__class__, attr, stop=(Celery, object), + monkey_patched=[__name__]) + + +def _unpickle_appattr(reverse_name, args): + """Given an attribute name and a list of args, gets + the attribute from the current app and calls it.""" + return get_current_app()._rgetattr(reverse_name)(*args) + + +def _global_after_fork(obj): + # Previously every app would call: + # `register_after_fork(app, app._after_fork)` + # but this created a leak as `register_after_fork` stores concrete object + # references and once registered an object cannot be removed without + # touching and iterating over the private afterfork registry list. + # + # See Issue #1949 + from celery import _state + from multiprocessing import util as mputil + for app in _state._apps: + try: + app._after_fork(obj) + except Exception as exc: + if mputil._logger: + mputil._logger.info( + 'after forker raised exception: %r', exc, exc_info=1) + + +def _ensure_after_fork(): + global _after_fork_registered + _after_fork_registered = True + register_after_fork(_global_after_fork, _global_after_fork) + + +class Celery(object): + #: This is deprecated, use :meth:`reduce_keys` instead + Pickler = AppPickler + + SYSTEM = platforms.SYSTEM + IS_OSX, IS_WINDOWS = platforms.IS_OSX, platforms.IS_WINDOWS + + amqp_cls = 'celery.app.amqp:AMQP' + backend_cls = None + events_cls = 'celery.events:Events' + loader_cls = 'celery.loaders.app:AppLoader' + log_cls = 'celery.app.log:Logging' + control_cls = 'celery.app.control:Control' + task_cls = 'celery.app.task:Task' + registry_cls = TaskRegistry + _fixups = None + _pool = None + builtin_fixups = BUILTIN_FIXUPS + + def __init__(self, main=None, loader=None, backend=None, + amqp=None, events=None, log=None, control=None, + set_as_current=True, accept_magic_kwargs=False, + tasks=None, broker=None, include=None, changes=None, + config_source=None, fixups=None, task_cls=None, + autofinalize=True, **kwargs): + self.clock = LamportClock() + self.main = main + self.amqp_cls = amqp or self.amqp_cls + self.events_cls = events or self.events_cls + self.loader_cls = loader or self.loader_cls + self.log_cls = log or self.log_cls + self.control_cls = control or self.control_cls + self.task_cls = task_cls or self.task_cls + self.set_as_current = set_as_current + self.registry_cls = symbol_by_name(self.registry_cls) + self.accept_magic_kwargs = accept_magic_kwargs + self.user_options = defaultdict(set) + self.steps = defaultdict(set) + self.autofinalize = autofinalize + + self.configured = False + self._config_source = config_source + self._pending_defaults = deque() + + self.finalized = False + self._finalize_mutex = threading.Lock() + self._pending = deque() + self._tasks = tasks + if not isinstance(self._tasks, TaskRegistry): + self._tasks = TaskRegistry(self._tasks or {}) + + # If the class defines a custom __reduce_args__ we need to use + # the old way of pickling apps, which is pickling a list of + # args instead of the new way that pickles a dict of keywords. + self._using_v1_reduce = app_has_custom(self, '__reduce_args__') + + # these options are moved to the config to + # simplify pickling of the app object. + self._preconf = changes or {} + if broker: + self._preconf['BROKER_URL'] = broker + if backend: + self._preconf['CELERY_RESULT_BACKEND'] = backend + if include: + self._preconf['CELERY_IMPORTS'] = include + + # - Apply fixups. + self.fixups = set(self.builtin_fixups) if fixups is None else fixups + # ...store fixup instances in _fixups to keep weakrefs alive. + self._fixups = [symbol_by_name(fixup)(self) for fixup in self.fixups] + + if self.set_as_current: + self.set_current() + + self.on_init() + _register_app(self) + + def set_current(self): + _set_current_app(self) + + def set_default(self): + set_default_app(self) + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + + def close(self): + self._maybe_close_pool() + + def on_init(self): + """Optional callback called at init.""" + pass + + def start(self, argv=None): + return instantiate( + 'celery.bin.celery:CeleryCommand', + app=self).execute_from_commandline(argv) + + def worker_main(self, argv=None): + return instantiate( + 'celery.bin.worker:worker', + app=self).execute_from_commandline(argv) + + def task(self, *args, **opts): + """Creates new task class from any callable.""" + if _EXECV and not opts.get('_force_evaluate'): + # When using execv the task in the original module will point to a + # different app, so doing things like 'add.request' will point to + # a differnt task instance. This makes sure it will always use + # the task instance from the current app. + # Really need a better solution for this :( + from . import shared_task + return shared_task(*args, _force_evaluate=True, **opts) + + def inner_create_task_cls(shared=True, filter=None, **opts): + _filt = filter # stupid 2to3 + + def _create_task_cls(fun): + if shared: + def cons(app): + return app._task_from_fun(fun, **opts) + cons.__name__ = fun.__name__ + connect_on_app_finalize(cons) + if self.accept_magic_kwargs: # compat mode + task = self._task_from_fun(fun, **opts) + if filter: + task = filter(task) + return task + + if self.finalized or opts.get('_force_evaluate'): + ret = self._task_from_fun(fun, **opts) + else: + # return a proxy object that evaluates on first use + ret = PromiseProxy(self._task_from_fun, (fun, ), opts, + __doc__=fun.__doc__) + self._pending.append(ret) + if _filt: + return _filt(ret) + return ret + + return _create_task_cls + + if len(args) == 1: + if callable(args[0]): + return inner_create_task_cls(**opts)(*args) + raise TypeError('argument 1 to @task() must be a callable') + if args: + raise TypeError( + '@task() takes exactly 1 argument ({0} given)'.format( + sum([len(args), len(opts)]))) + return inner_create_task_cls(**opts) + + def _task_from_fun(self, fun, **options): + if not self.finalized and not self.autofinalize: + raise RuntimeError('Contract breach: app not finalized') + base = options.pop('base', None) or self.Task + bind = options.pop('bind', False) + + T = type(fun.__name__, (base, ), dict({ + 'app': self, + 'accept_magic_kwargs': False, + 'run': fun if bind else staticmethod(fun), + '_decorated': True, + '__doc__': fun.__doc__, + '__module__': fun.__module__, + '__wrapped__': fun}, **options))() + task = self._tasks[T.name] # return global instance. + return task + + def finalize(self, auto=False): + with self._finalize_mutex: + if not self.finalized: + if auto and not self.autofinalize: + raise RuntimeError('Contract breach: app not finalized') + self.finalized = True + _announce_app_finalized(self) + + pending = self._pending + while pending: + maybe_evaluate(pending.popleft()) + + for task in values(self._tasks): + task.bind(self) + + def add_defaults(self, fun): + if not callable(fun): + d, fun = fun, lambda: d + if self.configured: + return self.conf.add_defaults(fun()) + self._pending_defaults.append(fun) + + def config_from_object(self, obj, silent=False, force=False): + self._config_source = obj + if force or self.configured: + del(self.conf) + return self.loader.config_from_object(obj, silent=silent) + + def config_from_envvar(self, variable_name, silent=False, force=False): + module_name = os.environ.get(variable_name) + if not module_name: + if silent: + return False + raise ImproperlyConfigured( + ERR_ENVVAR_NOT_SET.format(variable_name)) + return self.config_from_object(module_name, silent=silent, force=force) + + def config_from_cmdline(self, argv, namespace='celery'): + self.conf.update(self.loader.cmdline_config_parser(argv, namespace)) + + def setup_security(self, allowed_serializers=None, key=None, cert=None, + store=None, digest='sha1', serializer='json'): + from celery.security import setup_security + return setup_security(allowed_serializers, key, cert, + store, digest, serializer, app=self) + + def autodiscover_tasks(self, packages, related_name='tasks', force=False): + if force: + return self._autodiscover_tasks(packages, related_name) + signals.import_modules.connect(promise( + self._autodiscover_tasks, (packages, related_name), + ), weak=False, sender=self) + + def _autodiscover_tasks(self, packages, related_name='tasks', **kwargs): + # argument may be lazy + packages = packages() if callable(packages) else packages + self.loader.autodiscover_tasks(packages, related_name) + + def send_task(self, name, args=None, kwargs=None, countdown=None, + eta=None, task_id=None, producer=None, connection=None, + router=None, result_cls=None, expires=None, + publisher=None, link=None, link_error=None, + add_to_parent=True, reply_to=None, **options): + task_id = task_id or uuid() + producer = producer or publisher # XXX compat + router = router or self.amqp.router + conf = self.conf + if conf.CELERY_ALWAYS_EAGER: # pragma: no cover + warnings.warn(AlwaysEagerIgnored( + 'CELERY_ALWAYS_EAGER has no effect on send_task', + ), stacklevel=2) + options = router.route(options, name, args, kwargs) + if connection: + producer = self.amqp.TaskProducer(connection) + with self.producer_or_acquire(producer) as P: + self.backend.on_task_call(P, task_id) + task_id = P.publish_task( + name, args, kwargs, countdown=countdown, eta=eta, + task_id=task_id, expires=expires, + callbacks=maybe_list(link), errbacks=maybe_list(link_error), + reply_to=reply_to or self.oid, **options + ) + result = (result_cls or self.AsyncResult)(task_id) + if add_to_parent: + parent = get_current_worker_task() + if parent: + parent.add_trail(result) + return result + + def connection(self, hostname=None, userid=None, password=None, + virtual_host=None, port=None, ssl=None, + connect_timeout=None, transport=None, + transport_options=None, heartbeat=None, + login_method=None, failover_strategy=None, **kwargs): + conf = self.conf + return self.amqp.Connection( + hostname or conf.BROKER_URL, + userid or conf.BROKER_USER, + password or conf.BROKER_PASSWORD, + virtual_host or conf.BROKER_VHOST, + port or conf.BROKER_PORT, + transport=transport or conf.BROKER_TRANSPORT, + ssl=self.either('BROKER_USE_SSL', ssl), + heartbeat=heartbeat, + login_method=login_method or conf.BROKER_LOGIN_METHOD, + failover_strategy=( + failover_strategy or conf.BROKER_FAILOVER_STRATEGY + ), + transport_options=dict( + conf.BROKER_TRANSPORT_OPTIONS, **transport_options or {} + ), + connect_timeout=self.either( + 'BROKER_CONNECTION_TIMEOUT', connect_timeout + ), + ) + broker_connection = connection + + def _acquire_connection(self, pool=True): + """Helper for :meth:`connection_or_acquire`.""" + if pool: + return self.pool.acquire(block=True) + return self.connection() + + def connection_or_acquire(self, connection=None, pool=True, *_, **__): + return FallbackContext(connection, self._acquire_connection, pool=pool) + default_connection = connection_or_acquire # XXX compat + + def producer_or_acquire(self, producer=None): + return FallbackContext( + producer, self.amqp.producer_pool.acquire, block=True, + ) + default_producer = producer_or_acquire # XXX compat + + def prepare_config(self, c): + """Prepare configuration before it is merged with the defaults.""" + return find_deprecated_settings(c) + + def now(self): + return self.loader.now(utc=self.conf.CELERY_ENABLE_UTC) + + def mail_admins(self, subject, body, fail_silently=False): + if self.conf.ADMINS: + to = [admin_email for _, admin_email in self.conf.ADMINS] + return self.loader.mail_admins( + subject, body, fail_silently, to=to, + sender=self.conf.SERVER_EMAIL, + host=self.conf.EMAIL_HOST, + port=self.conf.EMAIL_PORT, + user=self.conf.EMAIL_HOST_USER, + password=self.conf.EMAIL_HOST_PASSWORD, + timeout=self.conf.EMAIL_TIMEOUT, + use_ssl=self.conf.EMAIL_USE_SSL, + use_tls=self.conf.EMAIL_USE_TLS, + ) + + def select_queues(self, queues=None): + return self.amqp.queues.select(queues) + + def either(self, default_key, *values): + """Fallback to the value of a configuration key if none of the + `*values` are true.""" + return first(None, values) or self.conf.get(default_key) + + def bugreport(self): + return bugreport(self) + + def _get_backend(self): + from celery.backends import get_backend_by_url + backend, url = get_backend_by_url( + self.backend_cls or self.conf.CELERY_RESULT_BACKEND, + self.loader) + return backend(app=self, url=url) + + def on_configure(self): + """Callback calld when the app loads configuration""" + pass + + def _get_config(self): + self.on_configure() + if self._config_source: + self.loader.config_from_object(self._config_source) + self.configured = True + s = Settings({}, [self.prepare_config(self.loader.conf), + deepcopy(DEFAULTS)]) + # load lazy config dict initializers. + pending = self._pending_defaults + while pending: + s.add_defaults(maybe_evaluate(pending.popleft()())) + + # preconf options must be explicitly set in the conf, and not + # as defaults or they will not be pickled with the app instance. + # This will cause errors when `CELERYD_FORCE_EXECV=True` as + # the workers will not have a BROKER_URL, CELERY_RESULT_BACKEND, + # or CELERY_IMPORTS set in the config. + if self._preconf: + s.update(self._preconf) + return s + + def _after_fork(self, obj_): + self._maybe_close_pool() + + def _maybe_close_pool(self): + pool, self._pool = self._pool, None + if pool is not None: + pool.force_close_all() + amqp = self.__dict__.get('amqp') + if amqp is not None: + producer_pool, amqp._producer_pool = amqp._producer_pool, None + if producer_pool is not None: + producer_pool.force_close_all() + + def signature(self, *args, **kwargs): + kwargs['app'] = self + return self.canvas.signature(*args, **kwargs) + + def create_task_cls(self): + """Creates a base task class using default configuration + taken from this app.""" + return self.subclass_with_self( + self.task_cls, name='Task', attribute='_app', + keep_reduce=True, abstract=True, + ) + + def subclass_with_self(self, Class, name=None, attribute='app', + reverse=None, keep_reduce=False, **kw): + """Subclass an app-compatible class by setting its app attribute + to be this app instance. + + App-compatible means that the class has a class attribute that + provides the default app it should use, e.g. + ``class Foo: app = None``. + + :param Class: The app-compatible class to subclass. + :keyword name: Custom name for the target class. + :keyword attribute: Name of the attribute holding the app, + default is 'app'. + + """ + Class = symbol_by_name(Class) + reverse = reverse if reverse else Class.__name__ + + def __reduce__(self): + return _unpickle_appattr, (reverse, self.__reduce_args__()) + + attrs = dict({attribute: self}, __module__=Class.__module__, + __doc__=Class.__doc__, **kw) + if not keep_reduce: + attrs['__reduce__'] = __reduce__ + + return type(name or Class.__name__, (Class, ), attrs) + + def _rgetattr(self, path): + return attrgetter(path)(self) + + def __repr__(self): + return '<{0} {1}>'.format(type(self).__name__, appstr(self)) + + def __reduce__(self): + if self._using_v1_reduce: + return self.__reduce_v1__() + return (_unpickle_app_v2, (self.__class__, self.__reduce_keys__())) + + def __reduce_v1__(self): + # Reduce only pickles the configuration changes, + # so the default configuration doesn't have to be passed + # between processes. + return ( + _unpickle_app, + (self.__class__, self.Pickler) + self.__reduce_args__(), + ) + + def __reduce_keys__(self): + """Return keyword arguments used to reconstruct the object + when unpickling.""" + return { + 'main': self.main, + 'changes': self.conf.changes if self.configured else self._preconf, + 'loader': self.loader_cls, + 'backend': self.backend_cls, + 'amqp': self.amqp_cls, + 'events': self.events_cls, + 'log': self.log_cls, + 'control': self.control_cls, + 'accept_magic_kwargs': self.accept_magic_kwargs, + 'fixups': self.fixups, + 'config_source': self._config_source, + 'task_cls': self.task_cls, + } + + def __reduce_args__(self): + """Deprecated method, please use :meth:`__reduce_keys__` instead.""" + return (self.main, self.conf.changes, + self.loader_cls, self.backend_cls, self.amqp_cls, + self.events_cls, self.log_cls, self.control_cls, + self.accept_magic_kwargs, self._config_source) + + @cached_property + def Worker(self): + return self.subclass_with_self('celery.apps.worker:Worker') + + @cached_property + def WorkController(self, **kwargs): + return self.subclass_with_self('celery.worker:WorkController') + + @cached_property + def Beat(self, **kwargs): + return self.subclass_with_self('celery.apps.beat:Beat') + + @cached_property + def Task(self): + return self.create_task_cls() + + @cached_property + def annotations(self): + return prepare_annotations(self.conf.CELERY_ANNOTATIONS) + + @cached_property + def AsyncResult(self): + return self.subclass_with_self('celery.result:AsyncResult') + + @cached_property + def ResultSet(self): + return self.subclass_with_self('celery.result:ResultSet') + + @cached_property + def GroupResult(self): + return self.subclass_with_self('celery.result:GroupResult') + + @cached_property + def TaskSet(self): # XXX compat + """Deprecated! Please use :class:`celery.group` instead.""" + return self.subclass_with_self('celery.task.sets:TaskSet') + + @cached_property + def TaskSetResult(self): # XXX compat + """Deprecated! Please use :attr:`GroupResult` instead.""" + return self.subclass_with_self('celery.result:TaskSetResult') + + @property + def pool(self): + if self._pool is None: + _ensure_after_fork() + limit = self.conf.BROKER_POOL_LIMIT + self._pool = self.connection().Pool(limit=limit) + return self._pool + + @property + def current_task(self): + return _task_stack.top + + @cached_property + def oid(self): + return oid_from(self) + + @cached_property + def amqp(self): + return instantiate(self.amqp_cls, app=self) + + @cached_property + def backend(self): + return self._get_backend() + + @cached_property + def conf(self): + return self._get_config() + + @cached_property + def control(self): + return instantiate(self.control_cls, app=self) + + @cached_property + def events(self): + return instantiate(self.events_cls, app=self) + + @cached_property + def loader(self): + return get_loader_cls(self.loader_cls)(app=self) + + @cached_property + def log(self): + return instantiate(self.log_cls, app=self) + + @cached_property + def canvas(self): + from celery import canvas + return canvas + + @cached_property + def tasks(self): + self.finalize(auto=True) + return self._tasks + + @cached_property + def timezone(self): + from celery.utils.timeutils import timezone + conf = self.conf + tz = conf.CELERY_TIMEZONE + if not tz: + return (timezone.get_timezone('UTC') if conf.CELERY_ENABLE_UTC + else timezone.local) + return timezone.get_timezone(self.conf.CELERY_TIMEZONE) +App = Celery # compat diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/builtins.py b/thesisenv/lib/python3.6/site-packages/celery/app/builtins.py new file mode 100644 index 0000000..1502768 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/app/builtins.py @@ -0,0 +1,379 @@ +# -*- coding: utf-8 -*- +""" + celery.app.builtins + ~~~~~~~~~~~~~~~~~~~ + + Built-in tasks that are always available in all + app instances. E.g. chord, group and xmap. + +""" +from __future__ import absolute_import + +from collections import deque + +from celery._state import get_current_worker_task, connect_on_app_finalize +from celery.utils import uuid +from celery.utils.log import get_logger + +__all__ = [] + +logger = get_logger(__name__) + + +@connect_on_app_finalize +def add_backend_cleanup_task(app): + """The backend cleanup task can be used to clean up the default result + backend. + + If the configured backend requires periodic cleanup this task is also + automatically configured to run every day at 4am (requires + :program:`celery beat` to be running). + + """ + @app.task(name='celery.backend_cleanup', + shared=False, _force_evaluate=True) + def backend_cleanup(): + app.backend.cleanup() + return backend_cleanup + + +@connect_on_app_finalize +def add_unlock_chord_task(app): + """This task is used by result backends without native chord support. + + It joins chords by creating a task chain polling the header for completion. + + """ + from celery.canvas import signature + from celery.exceptions import ChordError + from celery.result import allow_join_result, result_from_tuple + + default_propagate = app.conf.CELERY_CHORD_PROPAGATES + + @app.task(name='celery.chord_unlock', max_retries=None, shared=False, + default_retry_delay=1, ignore_result=True, _force_evaluate=True, + bind=True) + def unlock_chord(self, group_id, callback, interval=None, propagate=None, + max_retries=None, result=None, + Result=app.AsyncResult, GroupResult=app.GroupResult, + result_from_tuple=result_from_tuple): + # if propagate is disabled exceptions raised by chord tasks + # will be sent as part of the result list to the chord callback. + # Since 3.1 propagate will be enabled by default, and instead + # the chord callback changes state to FAILURE with the + # exception set to ChordError. + propagate = default_propagate if propagate is None else propagate + if interval is None: + interval = self.default_retry_delay + + # check if the task group is ready, and if so apply the callback. + deps = GroupResult( + group_id, + [result_from_tuple(r, app=app) for r in result], + app=app, + ) + j = deps.join_native if deps.supports_native_join else deps.join + + try: + ready = deps.ready() + except Exception as exc: + raise self.retry( + exc=exc, countdown=interval, max_retries=max_retries, + ) + else: + if not ready: + raise self.retry(countdown=interval, max_retries=max_retries) + + callback = signature(callback, app=app) + try: + with allow_join_result(): + ret = j(timeout=3.0, propagate=propagate) + except Exception as exc: + try: + culprit = next(deps._failed_join_report()) + reason = 'Dependency {0.id} raised {1!r}'.format( + culprit, exc, + ) + except StopIteration: + reason = repr(exc) + logger.error('Chord %r raised: %r', group_id, exc, exc_info=1) + app.backend.chord_error_from_stack(callback, + ChordError(reason)) + else: + try: + callback.delay(ret) + except Exception as exc: + logger.error('Chord %r raised: %r', group_id, exc, exc_info=1) + app.backend.chord_error_from_stack( + callback, + exc=ChordError('Callback error: {0!r}'.format(exc)), + ) + return unlock_chord + + +@connect_on_app_finalize +def add_map_task(app): + from celery.canvas import signature + + @app.task(name='celery.map', shared=False, _force_evaluate=True) + def xmap(task, it): + task = signature(task, app=app).type + return [task(item) for item in it] + return xmap + + +@connect_on_app_finalize +def add_starmap_task(app): + from celery.canvas import signature + + @app.task(name='celery.starmap', shared=False, _force_evaluate=True) + def xstarmap(task, it): + task = signature(task, app=app).type + return [task(*item) for item in it] + return xstarmap + + +@connect_on_app_finalize +def add_chunk_task(app): + from celery.canvas import chunks as _chunks + + @app.task(name='celery.chunks', shared=False, _force_evaluate=True) + def chunks(task, it, n): + return _chunks.apply_chunks(task, it, n) + return chunks + + +@connect_on_app_finalize +def add_group_task(app): + _app = app + from celery.canvas import maybe_signature, signature + from celery.result import result_from_tuple + + class Group(app.Task): + app = _app + name = 'celery.group' + accept_magic_kwargs = False + _decorated = True + + def run(self, tasks, result, group_id, partial_args, + add_to_parent=True): + app = self.app + result = result_from_tuple(result, app) + # any partial args are added to all tasks in the group + taskit = (signature(task, app=app).clone(partial_args) + for i, task in enumerate(tasks)) + if self.request.is_eager or app.conf.CELERY_ALWAYS_EAGER: + return app.GroupResult( + result.id, + [stask.apply(group_id=group_id) for stask in taskit], + ) + with app.producer_or_acquire() as pub: + [stask.apply_async(group_id=group_id, producer=pub, + add_to_parent=False) for stask in taskit] + parent = get_current_worker_task() + if add_to_parent and parent: + parent.add_trail(result) + return result + + def prepare(self, options, tasks, args, **kwargs): + options['group_id'] = group_id = ( + options.setdefault('task_id', uuid())) + + def prepare_member(task): + task = maybe_signature(task, app=self.app) + task.options['group_id'] = group_id + return task, task.freeze() + + try: + tasks, res = list(zip( + *[prepare_member(task) for task in tasks] + )) + except ValueError: # tasks empty + tasks, res = [], [] + return (tasks, self.app.GroupResult(group_id, res), group_id, args) + + def apply_async(self, partial_args=(), kwargs={}, **options): + if self.app.conf.CELERY_ALWAYS_EAGER: + return self.apply(partial_args, kwargs, **options) + tasks, result, gid, args = self.prepare( + options, args=partial_args, **kwargs + ) + super(Group, self).apply_async(( + list(tasks), result.as_tuple(), gid, args), **options + ) + return result + + def apply(self, args=(), kwargs={}, **options): + return super(Group, self).apply( + self.prepare(options, args=args, **kwargs), + **options).get() + return Group + + +@connect_on_app_finalize +def add_chain_task(app): + from celery.canvas import ( + Signature, chain, chord, group, maybe_signature, maybe_unroll_group, + ) + + _app = app + + class Chain(app.Task): + app = _app + name = 'celery.chain' + accept_magic_kwargs = False + _decorated = True + + def prepare_steps(self, args, tasks): + app = self.app + steps = deque(tasks) + next_step = prev_task = prev_res = None + tasks, results = [], [] + i = 0 + while steps: + # First task get partial args from chain. + task = maybe_signature(steps.popleft(), app=app) + task = task.clone() if i else task.clone(args) + res = task.freeze() + i += 1 + + if isinstance(task, group): + task = maybe_unroll_group(task) + if isinstance(task, chain): + # splice the chain + steps.extendleft(reversed(task.tasks)) + continue + + elif isinstance(task, group) and steps and \ + not isinstance(steps[0], group): + # automatically upgrade group(..) | s to chord(group, s) + try: + next_step = steps.popleft() + # for chords we freeze by pretending it's a normal + # task instead of a group. + res = Signature.freeze(next_step) + task = chord(task, body=next_step, task_id=res.task_id) + except IndexError: + pass # no callback, so keep as group + if prev_task: + # link previous task to this task. + prev_task.link(task) + # set the results parent attribute. + if not res.parent: + res.parent = prev_res + + if not isinstance(prev_task, chord): + results.append(res) + tasks.append(task) + prev_task, prev_res = task, res + + return tasks, results + + def apply_async(self, args=(), kwargs={}, group_id=None, chord=None, + task_id=None, link=None, link_error=None, **options): + if self.app.conf.CELERY_ALWAYS_EAGER: + return self.apply(args, kwargs, **options) + options.pop('publisher', None) + tasks, results = self.prepare_steps(args, kwargs['tasks']) + result = results[-1] + if group_id: + tasks[-1].set(group_id=group_id) + if chord: + tasks[-1].set(chord=chord) + if task_id: + tasks[-1].set(task_id=task_id) + result = tasks[-1].type.AsyncResult(task_id) + # make sure we can do a link() and link_error() on a chain object. + if link: + tasks[-1].set(link=link) + # and if any task in the chain fails, call the errbacks + if link_error: + for task in tasks: + task.set(link_error=link_error) + tasks[0].apply_async(**options) + return result + + def apply(self, args=(), kwargs={}, signature=maybe_signature, + **options): + app = self.app + last, fargs = None, args # fargs passed to first task only + for task in kwargs['tasks']: + res = signature(task, app=app).clone(fargs).apply( + last and (last.get(), ), + ) + res.parent, last, fargs = last, res, None + return last + return Chain + + +@connect_on_app_finalize +def add_chord_task(app): + """Every chord is executed in a dedicated task, so that the chord + can be used as a signature, and this generates the task + responsible for that.""" + from celery import group + from celery.canvas import maybe_signature + _app = app + default_propagate = app.conf.CELERY_CHORD_PROPAGATES + + class Chord(app.Task): + app = _app + name = 'celery.chord' + accept_magic_kwargs = False + ignore_result = False + _decorated = True + + def run(self, header, body, partial_args=(), interval=None, + countdown=1, max_retries=None, propagate=None, + eager=False, **kwargs): + app = self.app + propagate = default_propagate if propagate is None else propagate + group_id = uuid() + + # - convert back to group if serialized + tasks = header.tasks if isinstance(header, group) else header + header = group([ + maybe_signature(s, app=app).clone() for s in tasks + ], app=self.app) + # - eager applies the group inline + if eager: + return header.apply(args=partial_args, task_id=group_id) + + body['chord_size'] = len(header.tasks) + results = header.freeze(group_id=group_id, chord=body).results + + return self.backend.apply_chord( + header, partial_args, group_id, + body, interval=interval, countdown=countdown, + max_retries=max_retries, propagate=propagate, result=results, + ) + + def apply_async(self, args=(), kwargs={}, task_id=None, + group_id=None, chord=None, **options): + app = self.app + if app.conf.CELERY_ALWAYS_EAGER: + return self.apply(args, kwargs, **options) + header = kwargs.pop('header') + body = kwargs.pop('body') + header, body = (maybe_signature(header, app=app), + maybe_signature(body, app=app)) + # forward certain options to body + if chord is not None: + body.options['chord'] = chord + if group_id is not None: + body.options['group_id'] = group_id + [body.link(s) for s in options.pop('link', [])] + [body.link_error(s) for s in options.pop('link_error', [])] + body_result = body.freeze(task_id) + parent = super(Chord, self).apply_async((header, body, args), + kwargs, **options) + body_result.parent = parent + return body_result + + def apply(self, args=(), kwargs={}, propagate=True, **options): + body = kwargs['body'] + res = super(Chord, self).apply(args, dict(kwargs, eager=True), + **options) + return maybe_signature(body, app=self.app).apply( + args=(res.get(propagate=propagate).get(), )) + return Chord diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/control.py b/thesisenv/lib/python3.6/site-packages/celery/app/control.py new file mode 100644 index 0000000..7258dd6 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/app/control.py @@ -0,0 +1,317 @@ +# -*- coding: utf-8 -*- +""" + celery.app.control + ~~~~~~~~~~~~~~~~~~~ + + Client for worker remote control commands. + Server implementation is in :mod:`celery.worker.control`. + +""" +from __future__ import absolute_import + +import warnings + +from kombu.pidbox import Mailbox +from kombu.utils import cached_property + +from celery.exceptions import DuplicateNodenameWarning +from celery.utils.text import pluralize + +__all__ = ['Inspect', 'Control', 'flatten_reply'] + +W_DUPNODE = """\ +Received multiple replies from node {0}: {1}. +Please make sure you give each node a unique nodename using the `-n` option.\ +""" + + +def flatten_reply(reply): + nodes, dupes = {}, set() + for item in reply: + [dupes.add(name) for name in item if name in nodes] + nodes.update(item) + if dupes: + warnings.warn(DuplicateNodenameWarning( + W_DUPNODE.format( + pluralize(len(dupes), 'name'), ', '.join(sorted(dupes)), + ), + )) + return nodes + + +class Inspect(object): + app = None + + def __init__(self, destination=None, timeout=1, callback=None, + connection=None, app=None, limit=None): + self.app = app or self.app + self.destination = destination + self.timeout = timeout + self.callback = callback + self.connection = connection + self.limit = limit + + def _prepare(self, reply): + if not reply: + return + by_node = flatten_reply(reply) + if self.destination and \ + not isinstance(self.destination, (list, tuple)): + return by_node.get(self.destination) + return by_node + + def _request(self, command, **kwargs): + return self._prepare(self.app.control.broadcast( + command, + arguments=kwargs, + destination=self.destination, + callback=self.callback, + connection=self.connection, + limit=self.limit, + timeout=self.timeout, reply=True, + )) + + def report(self): + return self._request('report') + + def clock(self): + return self._request('clock') + + def active(self, safe=False): + return self._request('dump_active', safe=safe) + + def scheduled(self, safe=False): + return self._request('dump_schedule', safe=safe) + + def reserved(self, safe=False): + return self._request('dump_reserved', safe=safe) + + def stats(self): + return self._request('stats') + + def revoked(self): + return self._request('dump_revoked') + + def registered(self, *taskinfoitems): + return self._request('dump_tasks', taskinfoitems=taskinfoitems) + registered_tasks = registered + + def ping(self): + return self._request('ping') + + def active_queues(self): + return self._request('active_queues') + + def query_task(self, ids): + return self._request('query_task', ids=ids) + + def conf(self, with_defaults=False): + return self._request('dump_conf', with_defaults=with_defaults) + + def hello(self, from_node, revoked=None): + return self._request('hello', from_node=from_node, revoked=revoked) + + def memsample(self): + return self._request('memsample') + + def memdump(self, samples=10): + return self._request('memdump', samples=samples) + + def objgraph(self, type='Request', n=200, max_depth=10): + return self._request('objgraph', num=n, max_depth=max_depth, type=type) + + +class Control(object): + Mailbox = Mailbox + + def __init__(self, app=None): + self.app = app + self.mailbox = self.Mailbox('celery', type='fanout', accept=['json']) + + @cached_property + def inspect(self): + return self.app.subclass_with_self(Inspect, reverse='control.inspect') + + def purge(self, connection=None): + """Discard all waiting tasks. + + This will ignore all tasks waiting for execution, and they will + be deleted from the messaging server. + + :returns: the number of tasks discarded. + + """ + with self.app.connection_or_acquire(connection) as conn: + return self.app.amqp.TaskConsumer(conn).purge() + discard_all = purge + + def election(self, id, topic, action=None, connection=None): + self.broadcast('election', connection=connection, arguments={ + 'id': id, 'topic': topic, 'action': action, + }) + + def revoke(self, task_id, destination=None, terminate=False, + signal='SIGTERM', **kwargs): + """Tell all (or specific) workers to revoke a task by id. + + If a task is revoked, the workers will ignore the task and + not execute it after all. + + :param task_id: Id of the task to revoke. + :keyword terminate: Also terminate the process currently working + on the task (if any). + :keyword signal: Name of signal to send to process if terminate. + Default is TERM. + + See :meth:`broadcast` for supported keyword arguments. + + """ + return self.broadcast('revoke', destination=destination, + arguments={'task_id': task_id, + 'terminate': terminate, + 'signal': signal}, **kwargs) + + def ping(self, destination=None, timeout=1, **kwargs): + """Ping all (or specific) workers. + + Will return the list of answers. + + See :meth:`broadcast` for supported keyword arguments. + + """ + return self.broadcast('ping', reply=True, destination=destination, + timeout=timeout, **kwargs) + + def rate_limit(self, task_name, rate_limit, destination=None, **kwargs): + """Tell all (or specific) workers to set a new rate limit + for task by type. + + :param task_name: Name of task to change rate limit for. + :param rate_limit: The rate limit as tasks per second, or a rate limit + string (`'100/m'`, etc. + see :attr:`celery.task.base.Task.rate_limit` for + more information). + + See :meth:`broadcast` for supported keyword arguments. + + """ + return self.broadcast('rate_limit', destination=destination, + arguments={'task_name': task_name, + 'rate_limit': rate_limit}, + **kwargs) + + def add_consumer(self, queue, exchange=None, exchange_type='direct', + routing_key=None, options=None, **kwargs): + """Tell all (or specific) workers to start consuming from a new queue. + + Only the queue name is required as if only the queue is specified + then the exchange/routing key will be set to the same name ( + like automatic queues do). + + .. note:: + + This command does not respect the default queue/exchange + options in the configuration. + + :param queue: Name of queue to start consuming from. + :keyword exchange: Optional name of exchange. + :keyword exchange_type: Type of exchange (defaults to 'direct') + command to, when empty broadcast to all workers. + :keyword routing_key: Optional routing key. + :keyword options: Additional options as supported + by :meth:`kombu.entitiy.Queue.from_dict`. + + See :meth:`broadcast` for supported keyword arguments. + + """ + return self.broadcast( + 'add_consumer', + arguments=dict({'queue': queue, 'exchange': exchange, + 'exchange_type': exchange_type, + 'routing_key': routing_key}, **options or {}), + **kwargs + ) + + def cancel_consumer(self, queue, **kwargs): + """Tell all (or specific) workers to stop consuming from ``queue``. + + Supports the same keyword arguments as :meth:`broadcast`. + + """ + return self.broadcast( + 'cancel_consumer', arguments={'queue': queue}, **kwargs + ) + + def time_limit(self, task_name, soft=None, hard=None, **kwargs): + """Tell all (or specific) workers to set time limits for + a task by type. + + :param task_name: Name of task to change time limits for. + :keyword soft: New soft time limit (in seconds). + :keyword hard: New hard time limit (in seconds). + + Any additional keyword arguments are passed on to :meth:`broadcast`. + + """ + return self.broadcast( + 'time_limit', + arguments={'task_name': task_name, + 'hard': hard, 'soft': soft}, **kwargs) + + def enable_events(self, destination=None, **kwargs): + """Tell all (or specific) workers to enable events.""" + return self.broadcast('enable_events', {}, destination, **kwargs) + + def disable_events(self, destination=None, **kwargs): + """Tell all (or specific) workers to disable events.""" + return self.broadcast('disable_events', {}, destination, **kwargs) + + def pool_grow(self, n=1, destination=None, **kwargs): + """Tell all (or specific) workers to grow the pool by ``n``. + + Supports the same arguments as :meth:`broadcast`. + + """ + return self.broadcast('pool_grow', {'n': n}, destination, **kwargs) + + def pool_shrink(self, n=1, destination=None, **kwargs): + """Tell all (or specific) workers to shrink the pool by ``n``. + + Supports the same arguments as :meth:`broadcast`. + + """ + return self.broadcast('pool_shrink', {'n': n}, destination, **kwargs) + + def autoscale(self, max, min, destination=None, **kwargs): + """Change worker(s) autoscale setting. + + Supports the same arguments as :meth:`broadcast`. + + """ + return self.broadcast( + 'autoscale', {'max': max, 'min': min}, destination, **kwargs) + + def broadcast(self, command, arguments=None, destination=None, + connection=None, reply=False, timeout=1, limit=None, + callback=None, channel=None, **extra_kwargs): + """Broadcast a control command to the celery workers. + + :param command: Name of command to send. + :param arguments: Keyword arguments for the command. + :keyword destination: If set, a list of the hosts to send the + command to, when empty broadcast to all workers. + :keyword connection: Custom broker connection to use, if not set, + a connection will be established automatically. + :keyword reply: Wait for and return the reply. + :keyword timeout: Timeout in seconds to wait for the reply. + :keyword limit: Limit number of replies. + :keyword callback: Callback called immediately for each reply + received. + + """ + with self.app.connection_or_acquire(connection) as conn: + arguments = dict(arguments or {}, **extra_kwargs) + return self.mailbox(conn)._broadcast( + command, arguments, destination, reply, timeout, + limit, callback, channel=channel, + ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/defaults.py b/thesisenv/lib/python3.6/site-packages/celery/app/defaults.py new file mode 100644 index 0000000..aa7dd45 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/app/defaults.py @@ -0,0 +1,274 @@ +# -*- coding: utf-8 -*- +""" + celery.app.defaults + ~~~~~~~~~~~~~~~~~~~ + + Configuration introspection and defaults. + +""" +from __future__ import absolute_import + +import sys + +from collections import deque, namedtuple +from datetime import timedelta + +from celery.five import items +from celery.utils import strtobool +from celery.utils.functional import memoize + +__all__ = ['Option', 'NAMESPACES', 'flatten', 'find'] + +is_jython = sys.platform.startswith('java') +is_pypy = hasattr(sys, 'pypy_version_info') + +DEFAULT_POOL = 'prefork' +if is_jython: + DEFAULT_POOL = 'threads' +elif is_pypy: + if sys.pypy_version_info[0:3] < (1, 5, 0): + DEFAULT_POOL = 'solo' + else: + DEFAULT_POOL = 'prefork' + +DEFAULT_ACCEPT_CONTENT = ['json', 'pickle', 'msgpack', 'yaml'] +DEFAULT_PROCESS_LOG_FMT = """ + [%(asctime)s: %(levelname)s/%(processName)s] %(message)s +""".strip() +DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s] %(message)s' +DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \ +%(task_name)s[%(task_id)s]: %(message)s""" + +_BROKER_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0', + 'alt': 'BROKER_URL setting'} +_REDIS_OLD = {'deprecate_by': '2.5', 'remove_by': '4.0', + 'alt': 'URL form of CELERY_RESULT_BACKEND'} + +searchresult = namedtuple('searchresult', ('namespace', 'key', 'type')) + + +# logging: processName first introduced in Py 2.6.2 (Issue #1644). +if sys.version_info < (2, 6, 2): + DEFAULT_PROCESS_LOG_FMT = DEFAULT_LOG_FMT + + +class Option(object): + alt = None + deprecate_by = None + remove_by = None + typemap = dict(string=str, int=int, float=float, any=lambda v: v, + bool=strtobool, dict=dict, tuple=tuple) + + def __init__(self, default=None, *args, **kwargs): + self.default = default + self.type = kwargs.get('type') or 'string' + for attr, value in items(kwargs): + setattr(self, attr, value) + + def to_python(self, value): + return self.typemap[self.type](value) + + def __repr__(self): + return '{0} default->{1!r}>'.format(self.type, + self.default) + +NAMESPACES = { + 'BROKER': { + 'URL': Option(None, type='string'), + 'CONNECTION_TIMEOUT': Option(4, type='float'), + 'CONNECTION_RETRY': Option(True, type='bool'), + 'CONNECTION_MAX_RETRIES': Option(100, type='int'), + 'FAILOVER_STRATEGY': Option(None, type='string'), + 'HEARTBEAT': Option(None, type='int'), + 'HEARTBEAT_CHECKRATE': Option(3.0, type='int'), + 'LOGIN_METHOD': Option(None, type='string'), + 'POOL_LIMIT': Option(10, type='int'), + 'USE_SSL': Option(False, type='bool'), + 'TRANSPORT': Option(type='string'), + 'TRANSPORT_OPTIONS': Option({}, type='dict'), + 'HOST': Option(type='string', **_BROKER_OLD), + 'PORT': Option(type='int', **_BROKER_OLD), + 'USER': Option(type='string', **_BROKER_OLD), + 'PASSWORD': Option(type='string', **_BROKER_OLD), + 'VHOST': Option(type='string', **_BROKER_OLD), + }, + 'CASSANDRA': { + 'COLUMN_FAMILY': Option(type='string'), + 'DETAILED_MODE': Option(False, type='bool'), + 'KEYSPACE': Option(type='string'), + 'READ_CONSISTENCY': Option(type='string'), + 'SERVERS': Option(type='list'), + 'WRITE_CONSISTENCY': Option(type='string'), + }, + 'CELERY': { + 'ACCEPT_CONTENT': Option(DEFAULT_ACCEPT_CONTENT, type='list'), + 'ACKS_LATE': Option(False, type='bool'), + 'ALWAYS_EAGER': Option(False, type='bool'), + 'ANNOTATIONS': Option(type='any'), + 'BROADCAST_QUEUE': Option('celeryctl'), + 'BROADCAST_EXCHANGE': Option('celeryctl'), + 'BROADCAST_EXCHANGE_TYPE': Option('fanout'), + 'CACHE_BACKEND': Option(), + 'CACHE_BACKEND_OPTIONS': Option({}, type='dict'), + 'CHORD_PROPAGATES': Option(True, type='bool'), + 'COUCHBASE_BACKEND_SETTINGS': Option(None, type='dict'), + 'CREATE_MISSING_QUEUES': Option(True, type='bool'), + 'DEFAULT_RATE_LIMIT': Option(type='string'), + 'DISABLE_RATE_LIMITS': Option(False, type='bool'), + 'DEFAULT_ROUTING_KEY': Option('celery'), + 'DEFAULT_QUEUE': Option('celery'), + 'DEFAULT_EXCHANGE': Option('celery'), + 'DEFAULT_EXCHANGE_TYPE': Option('direct'), + 'DEFAULT_DELIVERY_MODE': Option(2, type='string'), + 'EAGER_PROPAGATES_EXCEPTIONS': Option(False, type='bool'), + 'ENABLE_UTC': Option(True, type='bool'), + 'ENABLE_REMOTE_CONTROL': Option(True, type='bool'), + 'EVENT_SERIALIZER': Option('json'), + 'EVENT_QUEUE_EXPIRES': Option(None, type='float'), + 'EVENT_QUEUE_TTL': Option(None, type='float'), + 'IMPORTS': Option((), type='tuple'), + 'INCLUDE': Option((), type='tuple'), + 'IGNORE_RESULT': Option(False, type='bool'), + 'MAX_CACHED_RESULTS': Option(100, type='int'), + 'MESSAGE_COMPRESSION': Option(type='string'), + 'MONGODB_BACKEND_SETTINGS': Option(type='dict'), + 'REDIS_HOST': Option(type='string', **_REDIS_OLD), + 'REDIS_PORT': Option(type='int', **_REDIS_OLD), + 'REDIS_DB': Option(type='int', **_REDIS_OLD), + 'REDIS_PASSWORD': Option(type='string', **_REDIS_OLD), + 'REDIS_MAX_CONNECTIONS': Option(type='int'), + 'RESULT_BACKEND': Option(type='string'), + 'RESULT_DB_SHORT_LIVED_SESSIONS': Option(False, type='bool'), + 'RESULT_DB_TABLENAMES': Option(type='dict'), + 'RESULT_DBURI': Option(), + 'RESULT_ENGINE_OPTIONS': Option(type='dict'), + 'RESULT_EXCHANGE': Option('celeryresults'), + 'RESULT_EXCHANGE_TYPE': Option('direct'), + 'RESULT_SERIALIZER': Option('pickle'), + 'RESULT_PERSISTENT': Option(None, type='bool'), + 'ROUTES': Option(type='any'), + 'SEND_EVENTS': Option(False, type='bool'), + 'SEND_TASK_ERROR_EMAILS': Option(False, type='bool'), + 'SEND_TASK_SENT_EVENT': Option(False, type='bool'), + 'STORE_ERRORS_EVEN_IF_IGNORED': Option(False, type='bool'), + 'TASK_PUBLISH_RETRY': Option(True, type='bool'), + 'TASK_PUBLISH_RETRY_POLICY': Option({ + 'max_retries': 3, + 'interval_start': 0, + 'interval_max': 1, + 'interval_step': 0.2}, type='dict'), + 'TASK_RESULT_EXPIRES': Option(timedelta(days=1), type='float'), + 'TASK_SERIALIZER': Option('pickle'), + 'TIMEZONE': Option(type='string'), + 'TRACK_STARTED': Option(False, type='bool'), + 'REDIRECT_STDOUTS': Option(True, type='bool'), + 'REDIRECT_STDOUTS_LEVEL': Option('WARNING'), + 'QUEUES': Option(type='dict'), + 'QUEUE_HA_POLICY': Option(None, type='string'), + 'SECURITY_KEY': Option(type='string'), + 'SECURITY_CERTIFICATE': Option(type='string'), + 'SECURITY_CERT_STORE': Option(type='string'), + 'WORKER_DIRECT': Option(False, type='bool'), + }, + 'CELERYD': { + 'AGENT': Option(None, type='string'), + 'AUTOSCALER': Option('celery.worker.autoscale:Autoscaler'), + 'AUTORELOADER': Option('celery.worker.autoreload:Autoreloader'), + 'CONCURRENCY': Option(0, type='int'), + 'TIMER': Option(type='string'), + 'TIMER_PRECISION': Option(1.0, type='float'), + 'FORCE_EXECV': Option(False, type='bool'), + 'HIJACK_ROOT_LOGGER': Option(True, type='bool'), + 'CONSUMER': Option('celery.worker.consumer:Consumer', type='string'), + 'LOG_FORMAT': Option(DEFAULT_PROCESS_LOG_FMT), + 'LOG_COLOR': Option(type='bool'), + 'LOG_LEVEL': Option('WARN', deprecate_by='2.4', remove_by='4.0', + alt='--loglevel argument'), + 'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0', + alt='--logfile argument'), + 'MAX_TASKS_PER_CHILD': Option(type='int'), + 'POOL': Option(DEFAULT_POOL), + 'POOL_PUTLOCKS': Option(True, type='bool'), + 'POOL_RESTARTS': Option(False, type='bool'), + 'PREFETCH_MULTIPLIER': Option(4, type='int'), + 'STATE_DB': Option(), + 'TASK_LOG_FORMAT': Option(DEFAULT_TASK_LOG_FMT), + 'TASK_SOFT_TIME_LIMIT': Option(type='float'), + 'TASK_TIME_LIMIT': Option(type='float'), + 'WORKER_LOST_WAIT': Option(10.0, type='float') + }, + 'CELERYBEAT': { + 'SCHEDULE': Option({}, type='dict'), + 'SCHEDULER': Option('celery.beat:PersistentScheduler'), + 'SCHEDULE_FILENAME': Option('celerybeat-schedule'), + 'SYNC_EVERY': Option(0, type='int'), + 'MAX_LOOP_INTERVAL': Option(0, type='float'), + 'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0', + alt='--loglevel argument'), + 'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0', + alt='--logfile argument'), + }, + 'CELERYMON': { + 'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='4.0', + alt='--loglevel argument'), + 'LOG_FILE': Option(deprecate_by='2.4', remove_by='4.0', + alt='--logfile argument'), + 'LOG_FORMAT': Option(DEFAULT_LOG_FMT), + }, + 'EMAIL': { + 'HOST': Option('localhost'), + 'PORT': Option(25, type='int'), + 'HOST_USER': Option(), + 'HOST_PASSWORD': Option(), + 'TIMEOUT': Option(2, type='float'), + 'USE_SSL': Option(False, type='bool'), + 'USE_TLS': Option(False, type='bool'), + }, + 'SERVER_EMAIL': Option('celery@localhost'), + 'ADMINS': Option((), type='tuple'), +} + + +def flatten(d, ns=''): + stack = deque([(ns, d)]) + while stack: + name, space = stack.popleft() + for key, value in items(space): + if isinstance(value, dict): + stack.append((name + key + '_', value)) + else: + yield name + key, value +DEFAULTS = dict((key, value.default) for key, value in flatten(NAMESPACES)) + + +def find_deprecated_settings(source): + from celery.utils import warn_deprecated + for name, opt in flatten(NAMESPACES): + if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None): + warn_deprecated(description='The {0!r} setting'.format(name), + deprecation=opt.deprecate_by, + removal=opt.remove_by, + alternative='Use the {0.alt} instead'.format(opt)) + return source + + +@memoize(maxsize=None) +def find(name, namespace='celery'): + # - Try specified namespace first. + namespace = namespace.upper() + try: + return searchresult( + namespace, name.upper(), NAMESPACES[namespace][name.upper()], + ) + except KeyError: + # - Try all the other namespaces. + for ns, keys in items(NAMESPACES): + if ns.upper() == name.upper(): + return searchresult(None, ns, keys) + elif isinstance(keys, dict): + try: + return searchresult(ns, name.upper(), keys[name.upper()]) + except KeyError: + pass + # - See if name is a qualname last. + return searchresult(None, name.upper(), DEFAULTS[name.upper()]) diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/log.py b/thesisenv/lib/python3.6/site-packages/celery/app/log.py new file mode 100644 index 0000000..3d350e9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/app/log.py @@ -0,0 +1,257 @@ +# -*- coding: utf-8 -*- +""" + celery.app.log + ~~~~~~~~~~~~~~ + + The Celery instances logging section: ``Celery.log``. + + Sets up logging for the worker and other programs, + redirects stdouts, colors log output, patches logging + related compatibility fixes, and so on. + +""" +from __future__ import absolute_import + +import logging +import os +import sys + +from logging.handlers import WatchedFileHandler + +from kombu.log import NullHandler +from kombu.utils.encoding import set_default_encoding_file + +from celery import signals +from celery._state import get_current_task +from celery.five import class_property, string_t +from celery.utils import isatty, node_format +from celery.utils.log import ( + get_logger, mlevel, + ColorFormatter, ensure_process_aware_logger, + LoggingProxy, get_multiprocessing_logger, + reset_multiprocessing_logger, +) +from celery.utils.term import colored + +__all__ = ['TaskFormatter', 'Logging'] + +MP_LOG = os.environ.get('MP_LOG', False) + + +class TaskFormatter(ColorFormatter): + + def format(self, record): + task = get_current_task() + if task and task.request: + record.__dict__.update(task_id=task.request.id, + task_name=task.name) + else: + record.__dict__.setdefault('task_name', '???') + record.__dict__.setdefault('task_id', '???') + return ColorFormatter.format(self, record) + + +class Logging(object): + #: The logging subsystem is only configured once per process. + #: setup_logging_subsystem sets this flag, and subsequent calls + #: will do nothing. + _setup = False + + def __init__(self, app): + self.app = app + self.loglevel = mlevel(self.app.conf.CELERYD_LOG_LEVEL) + self.format = self.app.conf.CELERYD_LOG_FORMAT + self.task_format = self.app.conf.CELERYD_TASK_LOG_FORMAT + self.colorize = self.app.conf.CELERYD_LOG_COLOR + + def setup(self, loglevel=None, logfile=None, redirect_stdouts=False, + redirect_level='WARNING', colorize=None, hostname=None): + handled = self.setup_logging_subsystem( + loglevel, logfile, colorize=colorize, hostname=hostname, + ) + if not handled: + if redirect_stdouts: + self.redirect_stdouts(redirect_level) + os.environ.update( + CELERY_LOG_LEVEL=str(loglevel) if loglevel else '', + CELERY_LOG_FILE=str(logfile) if logfile else '', + ) + return handled + + def redirect_stdouts(self, loglevel=None, name='celery.redirected'): + self.redirect_stdouts_to_logger( + get_logger(name), loglevel=loglevel + ) + os.environ.update( + CELERY_LOG_REDIRECT='1', + CELERY_LOG_REDIRECT_LEVEL=str(loglevel or ''), + ) + + def setup_logging_subsystem(self, loglevel=None, logfile=None, format=None, + colorize=None, hostname=None, **kwargs): + if self.already_setup: + return + if logfile and hostname: + logfile = node_format(logfile, hostname) + self.already_setup = True + loglevel = mlevel(loglevel or self.loglevel) + format = format or self.format + colorize = self.supports_color(colorize, logfile) + reset_multiprocessing_logger() + ensure_process_aware_logger() + receivers = signals.setup_logging.send( + sender=None, loglevel=loglevel, logfile=logfile, + format=format, colorize=colorize, + ) + + if not receivers: + root = logging.getLogger() + + if self.app.conf.CELERYD_HIJACK_ROOT_LOGGER: + root.handlers = [] + get_logger('celery').handlers = [] + get_logger('celery.task').handlers = [] + get_logger('celery.redirected').handlers = [] + + # Configure root logger + self._configure_logger( + root, logfile, loglevel, format, colorize, **kwargs + ) + + # Configure the multiprocessing logger + self._configure_logger( + get_multiprocessing_logger(), + logfile, loglevel if MP_LOG else logging.ERROR, + format, colorize, **kwargs + ) + + signals.after_setup_logger.send( + sender=None, logger=root, + loglevel=loglevel, logfile=logfile, + format=format, colorize=colorize, + ) + + # then setup the root task logger. + self.setup_task_loggers(loglevel, logfile, colorize=colorize) + + try: + stream = logging.getLogger().handlers[0].stream + except (AttributeError, IndexError): + pass + else: + set_default_encoding_file(stream) + + # This is a hack for multiprocessing's fork+exec, so that + # logging before Process.run works. + logfile_name = logfile if isinstance(logfile, string_t) else '' + os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel), + _MP_FORK_LOGFILE_=logfile_name, + _MP_FORK_LOGFORMAT_=format) + return receivers + + def _configure_logger(self, logger, logfile, loglevel, + format, colorize, **kwargs): + if logger is not None: + self.setup_handlers(logger, logfile, format, + colorize, **kwargs) + if loglevel: + logger.setLevel(loglevel) + + def setup_task_loggers(self, loglevel=None, logfile=None, format=None, + colorize=None, propagate=False, **kwargs): + """Setup the task logger. + + If `logfile` is not specified, then `sys.stderr` is used. + + Will return the base task logger object. + + """ + loglevel = mlevel(loglevel or self.loglevel) + format = format or self.task_format + colorize = self.supports_color(colorize, logfile) + + logger = self.setup_handlers( + get_logger('celery.task'), + logfile, format, colorize, + formatter=TaskFormatter, **kwargs + ) + logger.setLevel(loglevel) + # this is an int for some reason, better not question why. + logger.propagate = int(propagate) + signals.after_setup_task_logger.send( + sender=None, logger=logger, + loglevel=loglevel, logfile=logfile, + format=format, colorize=colorize, + ) + return logger + + def redirect_stdouts_to_logger(self, logger, loglevel=None, + stdout=True, stderr=True): + """Redirect :class:`sys.stdout` and :class:`sys.stderr` to a + logging instance. + + :param logger: The :class:`logging.Logger` instance to redirect to. + :param loglevel: The loglevel redirected messages will be logged as. + + """ + proxy = LoggingProxy(logger, loglevel) + if stdout: + sys.stdout = proxy + if stderr: + sys.stderr = proxy + return proxy + + def supports_color(self, colorize=None, logfile=None): + colorize = self.colorize if colorize is None else colorize + if self.app.IS_WINDOWS: + # Windows does not support ANSI color codes. + return False + if colorize or colorize is None: + # Only use color if there is no active log file + # and stderr is an actual terminal. + return logfile is None and isatty(sys.stderr) + return colorize + + def colored(self, logfile=None, enabled=None): + return colored(enabled=self.supports_color(enabled, logfile)) + + def setup_handlers(self, logger, logfile, format, colorize, + formatter=ColorFormatter, **kwargs): + if self._is_configured(logger): + return logger + handler = self._detect_handler(logfile) + handler.setFormatter(formatter(format, use_color=colorize)) + logger.addHandler(handler) + return logger + + def _detect_handler(self, logfile=None): + """Create log handler with either a filename, an open stream + or :const:`None` (stderr).""" + logfile = sys.__stderr__ if logfile is None else logfile + if hasattr(logfile, 'write'): + return logging.StreamHandler(logfile) + return WatchedFileHandler(logfile) + + def _has_handler(self, logger): + if logger.handlers: + return any(not isinstance(h, NullHandler) for h in logger.handlers) + + def _is_configured(self, logger): + return self._has_handler(logger) and not getattr( + logger, '_rudimentary_setup', False) + + def setup_logger(self, name='celery', *args, **kwargs): + """Deprecated: No longer used.""" + self.setup_logging_subsystem(*args, **kwargs) + return logging.root + + def get_default_logger(self, name='celery', **kwargs): + return get_logger(name) + + @class_property + def already_setup(cls): + return cls._setup + + @already_setup.setter # noqa + def already_setup(cls, was_setup): + cls._setup = was_setup diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/registry.py b/thesisenv/lib/python3.6/site-packages/celery/app/registry.py new file mode 100644 index 0000000..7046554 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/app/registry.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +""" + celery.app.registry + ~~~~~~~~~~~~~~~~~~~ + + Registry of available tasks. + +""" +from __future__ import absolute_import + +import inspect + +from importlib import import_module + +from celery._state import get_current_app +from celery.exceptions import NotRegistered +from celery.five import items + +__all__ = ['TaskRegistry'] + + +class TaskRegistry(dict): + NotRegistered = NotRegistered + + def __missing__(self, key): + raise self.NotRegistered(key) + + def register(self, task): + """Register a task in the task registry. + + The task will be automatically instantiated if not already an + instance. + + """ + self[task.name] = inspect.isclass(task) and task() or task + + def unregister(self, name): + """Unregister task by name. + + :param name: name of the task to unregister, or a + :class:`celery.task.base.Task` with a valid `name` attribute. + + :raises celery.exceptions.NotRegistered: if the task has not + been registered. + + """ + try: + self.pop(getattr(name, 'name', name)) + except KeyError: + raise self.NotRegistered(name) + + # -- these methods are irrelevant now and will be removed in 4.0 + def regular(self): + return self.filter_types('regular') + + def periodic(self): + return self.filter_types('periodic') + + def filter_types(self, type): + return dict((name, task) for name, task in items(self) + if getattr(task, 'type', 'regular') == type) + + +def _unpickle_task(name): + return get_current_app().tasks[name] + + +def _unpickle_task_v2(name, module=None): + if module: + import_module(module) + return get_current_app().tasks[name] diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/routes.py b/thesisenv/lib/python3.6/site-packages/celery/app/routes.py new file mode 100644 index 0000000..b1e7314 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/app/routes.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +""" + celery.routes + ~~~~~~~~~~~~~ + + Contains utilities for working with task routers, + (:setting:`CELERY_ROUTES`). + +""" +from __future__ import absolute_import + +from celery.exceptions import QueueNotFound +from celery.five import string_t +from celery.utils import lpmerge +from celery.utils.functional import firstmethod, mlazy +from celery.utils.imports import instantiate + +__all__ = ['MapRoute', 'Router', 'prepare'] + +_first_route = firstmethod('route_for_task') + + +class MapRoute(object): + """Creates a router out of a :class:`dict`.""" + + def __init__(self, map): + self.map = map + + def route_for_task(self, task, *args, **kwargs): + try: + return dict(self.map[task]) + except KeyError: + pass + except ValueError: + return {'queue': self.map[task]} + + +class Router(object): + + def __init__(self, routes=None, queues=None, + create_missing=False, app=None): + self.app = app + self.queues = {} if queues is None else queues + self.routes = [] if routes is None else routes + self.create_missing = create_missing + + def route(self, options, task, args=(), kwargs={}): + options = self.expand_destination(options) # expands 'queue' + if self.routes: + route = self.lookup_route(task, args, kwargs) + if route: # expands 'queue' in route. + return lpmerge(self.expand_destination(route), options) + if 'queue' not in options: + options = lpmerge(self.expand_destination( + self.app.conf.CELERY_DEFAULT_QUEUE), options) + return options + + def expand_destination(self, route): + # Route can be a queue name: convenient for direct exchanges. + if isinstance(route, string_t): + queue, route = route, {} + else: + # can use defaults from configured queue, but override specific + # things (like the routing_key): great for topic exchanges. + queue = route.pop('queue', None) + + if queue: + try: + Q = self.queues[queue] # noqa + except KeyError: + raise QueueNotFound( + 'Queue {0!r} missing from CELERY_QUEUES'.format(queue)) + # needs to be declared by publisher + route['queue'] = Q + return route + + def lookup_route(self, task, args=None, kwargs=None): + return _first_route(self.routes, task, args, kwargs) + + +def prepare(routes): + """Expands the :setting:`CELERY_ROUTES` setting.""" + + def expand_route(route): + if isinstance(route, dict): + return MapRoute(route) + if isinstance(route, string_t): + return mlazy(instantiate, route) + return route + + if routes is None: + return () + if not isinstance(routes, (list, tuple)): + routes = (routes, ) + return [expand_route(route) for route in routes] diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/task.py b/thesisenv/lib/python3.6/site-packages/celery/app/task.py new file mode 100644 index 0000000..3360005 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/app/task.py @@ -0,0 +1,948 @@ +# -*- coding: utf-8 -*- +""" + celery.app.task + ~~~~~~~~~~~~~~~ + + Task Implementation: Task request context, and the base task class. + +""" +from __future__ import absolute_import + +import sys + +from billiard.einfo import ExceptionInfo + +from celery import current_app +from celery import states +from celery._state import _task_stack +from celery.canvas import signature +from celery.exceptions import MaxRetriesExceededError, Reject, Retry +from celery.five import class_property, items, with_metaclass +from celery.local import Proxy +from celery.result import EagerResult +from celery.utils import gen_task_name, fun_takes_kwargs, uuid, maybe_reraise +from celery.utils.functional import mattrgetter, maybe_list +from celery.utils.imports import instantiate +from celery.utils.mail import ErrorMail + +from .annotations import resolve_all as resolve_all_annotations +from .registry import _unpickle_task_v2 +from .utils import appstr + +__all__ = ['Context', 'Task'] + +#: extracts attributes related to publishing a message from an object. +extract_exec_options = mattrgetter( + 'queue', 'routing_key', 'exchange', 'priority', 'expires', + 'serializer', 'delivery_mode', 'compression', 'time_limit', + 'soft_time_limit', 'immediate', 'mandatory', # imm+man is deprecated +) + +# We take __repr__ very seriously around here ;) +R_BOUND_TASK = '' +R_UNBOUND_TASK = '' +R_SELF_TASK = '<@task {0.name} bound to other {0.__self__}>' +R_INSTANCE = '<@task: {0.name} of {app}{flags}>' + + +class _CompatShared(object): + + def __init__(self, name, cons): + self.name = name + self.cons = cons + + def __hash__(self): + return hash(self.name) + + def __repr__(self): + return '' % (self.name, ) + + def __call__(self, app): + return self.cons(app) + + +def _strflags(flags, default=''): + if flags: + return ' ({0})'.format(', '.join(flags)) + return default + + +def _reprtask(task, fmt=None, flags=None): + flags = list(flags) if flags is not None else [] + flags.append('v2 compatible') if task.__v2_compat__ else None + if not fmt: + fmt = R_BOUND_TASK if task._app else R_UNBOUND_TASK + return fmt.format( + task, flags=_strflags(flags), + app=appstr(task._app) if task._app else None, + ) + + +class Context(object): + # Default context + logfile = None + loglevel = None + hostname = None + id = None + args = None + kwargs = None + retries = 0 + eta = None + expires = None + is_eager = False + headers = None + delivery_info = None + reply_to = None + correlation_id = None + taskset = None # compat alias to group + group = None + chord = None + utc = None + called_directly = True + callbacks = None + errbacks = None + timelimit = None + _children = None # see property + _protected = 0 + + def __init__(self, *args, **kwargs): + self.update(*args, **kwargs) + + def update(self, *args, **kwargs): + return self.__dict__.update(*args, **kwargs) + + def clear(self): + return self.__dict__.clear() + + def get(self, key, default=None): + return getattr(self, key, default) + + def __repr__(self): + return ''.format(vars(self)) + + @property + def children(self): + # children must be an empy list for every thread + if self._children is None: + self._children = [] + return self._children + + +class TaskType(type): + """Meta class for tasks. + + Automatically registers the task in the task registry (except + if the :attr:`Task.abstract`` attribute is set). + + If no :attr:`Task.name` attribute is provided, then the name is generated + from the module and class name. + + """ + _creation_count = {} # used by old non-abstract task classes + + def __new__(cls, name, bases, attrs): + new = super(TaskType, cls).__new__ + task_module = attrs.get('__module__') or '__main__' + + # - Abstract class: abstract attribute should not be inherited. + abstract = attrs.pop('abstract', None) + if abstract or not attrs.get('autoregister', True): + return new(cls, name, bases, attrs) + + # The 'app' attribute is now a property, with the real app located + # in the '_app' attribute. Previously this was a regular attribute, + # so we should support classes defining it. + app = attrs.pop('_app', None) or attrs.pop('app', None) + + # Attempt to inherit app from one the bases + if not isinstance(app, Proxy) and app is None: + for base in bases: + if getattr(base, '_app', None): + app = base._app + break + else: + app = current_app._get_current_object() + attrs['_app'] = app + + # - Automatically generate missing/empty name. + task_name = attrs.get('name') + if not task_name: + attrs['name'] = task_name = gen_task_name(app, name, task_module) + + if not attrs.get('_decorated'): + # non decorated tasks must also be shared in case + # an app is created multiple times due to modules + # imported under multiple names. + # Hairy stuff, here to be compatible with 2.x. + # People should not use non-abstract task classes anymore, + # use the task decorator. + from celery._state import connect_on_app_finalize + unique_name = '.'.join([task_module, name]) + if unique_name not in cls._creation_count: + # the creation count is used as a safety + # so that the same task is not added recursively + # to the set of constructors. + cls._creation_count[unique_name] = 1 + connect_on_app_finalize(_CompatShared( + unique_name, + lambda app: TaskType.__new__(cls, name, bases, + dict(attrs, _app=app)), + )) + + # - Create and register class. + # Because of the way import happens (recursively) + # we may or may not be the first time the task tries to register + # with the framework. There should only be one class for each task + # name, so we always return the registered version. + tasks = app._tasks + if task_name not in tasks: + tasks.register(new(cls, name, bases, attrs)) + instance = tasks[task_name] + instance.bind(app) + return instance.__class__ + + def __repr__(cls): + return _reprtask(cls) + + +@with_metaclass(TaskType) +class Task(object): + """Task base class. + + When called tasks apply the :meth:`run` method. This method must + be defined by all tasks (that is unless the :meth:`__call__` method + is overridden). + + """ + __trace__ = None + __v2_compat__ = False # set by old base in celery.task.base + + ErrorMail = ErrorMail + MaxRetriesExceededError = MaxRetriesExceededError + + #: Execution strategy used, or the qualified name of one. + Strategy = 'celery.worker.strategy:default' + + #: This is the instance bound to if the task is a method of a class. + __self__ = None + + #: The application instance associated with this task class. + _app = None + + #: Name of the task. + name = None + + #: If :const:`True` the task is an abstract base class. + abstract = True + + #: If disabled the worker will not forward magic keyword arguments. + #: Deprecated and scheduled for removal in v4.0. + accept_magic_kwargs = False + + #: Maximum number of retries before giving up. If set to :const:`None`, + #: it will **never** stop retrying. + max_retries = 3 + + #: Default time in seconds before a retry of the task should be + #: executed. 3 minutes by default. + default_retry_delay = 3 * 60 + + #: Rate limit for this task type. Examples: :const:`None` (no rate + #: limit), `'100/s'` (hundred tasks a second), `'100/m'` (hundred tasks + #: a minute),`'100/h'` (hundred tasks an hour) + rate_limit = None + + #: If enabled the worker will not store task state and return values + #: for this task. Defaults to the :setting:`CELERY_IGNORE_RESULT` + #: setting. + ignore_result = None + + #: If enabled the request will keep track of subtasks started by + #: this task, and this information will be sent with the result + #: (``result.children``). + trail = True + + #: If enabled the worker will send monitoring events related to + #: this task (but only if the worker is configured to send + #: task related events). + #: Note that this has no effect on the task-failure event case + #: where a task is not registered (as it will have no task class + #: to check this flag). + send_events = True + + #: When enabled errors will be stored even if the task is otherwise + #: configured to ignore results. + store_errors_even_if_ignored = None + + #: If enabled an email will be sent to :setting:`ADMINS` whenever a task + #: of this type fails. + send_error_emails = None + + #: The name of a serializer that are registered with + #: :mod:`kombu.serialization.registry`. Default is `'pickle'`. + serializer = None + + #: Hard time limit. + #: Defaults to the :setting:`CELERYD_TASK_TIME_LIMIT` setting. + time_limit = None + + #: Soft time limit. + #: Defaults to the :setting:`CELERYD_TASK_SOFT_TIME_LIMIT` setting. + soft_time_limit = None + + #: The result store backend used for this task. + backend = None + + #: If disabled this task won't be registered automatically. + autoregister = True + + #: If enabled the task will report its status as 'started' when the task + #: is executed by a worker. Disabled by default as the normal behaviour + #: is to not report that level of granularity. Tasks are either pending, + #: finished, or waiting to be retried. + #: + #: Having a 'started' status can be useful for when there are long + #: running tasks and there is a need to report which task is currently + #: running. + #: + #: The application default can be overridden using the + #: :setting:`CELERY_TRACK_STARTED` setting. + track_started = None + + #: When enabled messages for this task will be acknowledged **after** + #: the task has been executed, and not *just before* which is the + #: default behavior. + #: + #: Please note that this means the task may be executed twice if the + #: worker crashes mid execution (which may be acceptable for some + #: applications). + #: + #: The application default can be overridden with the + #: :setting:`CELERY_ACKS_LATE` setting. + acks_late = None + + #: Tuple of expected exceptions. + #: + #: These are errors that are expected in normal operation + #: and that should not be regarded as a real error by the worker. + #: Currently this means that the state will be updated to an error + #: state, but the worker will not log the event as an error. + throws = () + + #: Default task expiry time. + expires = None + + #: Some may expect a request to exist even if the task has not been + #: called. This should probably be deprecated. + _default_request = None + + _exec_options = None + + __bound__ = False + + from_config = ( + ('send_error_emails', 'CELERY_SEND_TASK_ERROR_EMAILS'), + ('serializer', 'CELERY_TASK_SERIALIZER'), + ('rate_limit', 'CELERY_DEFAULT_RATE_LIMIT'), + ('track_started', 'CELERY_TRACK_STARTED'), + ('acks_late', 'CELERY_ACKS_LATE'), + ('ignore_result', 'CELERY_IGNORE_RESULT'), + ('store_errors_even_if_ignored', + 'CELERY_STORE_ERRORS_EVEN_IF_IGNORED'), + ) + + _backend = None # set by backend property. + + __bound__ = False + + # - Tasks are lazily bound, so that configuration is not set + # - until the task is actually used + + @classmethod + def bind(self, app): + was_bound, self.__bound__ = self.__bound__, True + self._app = app + conf = app.conf + self._exec_options = None # clear option cache + + for attr_name, config_name in self.from_config: + if getattr(self, attr_name, None) is None: + setattr(self, attr_name, conf[config_name]) + if self.accept_magic_kwargs is None: + self.accept_magic_kwargs = app.accept_magic_kwargs + + # decorate with annotations from config. + if not was_bound: + self.annotate() + + from celery.utils.threads import LocalStack + self.request_stack = LocalStack() + + # PeriodicTask uses this to add itself to the PeriodicTask schedule. + self.on_bound(app) + + return app + + @classmethod + def on_bound(self, app): + """This method can be defined to do additional actions when the + task class is bound to an app.""" + pass + + @classmethod + def _get_app(self): + if self._app is None: + self._app = current_app + if not self.__bound__: + # The app property's __set__ method is not called + # if Task.app is set (on the class), so must bind on use. + self.bind(self._app) + return self._app + app = class_property(_get_app, bind) + + @classmethod + def annotate(self): + for d in resolve_all_annotations(self.app.annotations, self): + for key, value in items(d): + if key.startswith('@'): + self.add_around(key[1:], value) + else: + setattr(self, key, value) + + @classmethod + def add_around(self, attr, around): + orig = getattr(self, attr) + if getattr(orig, '__wrapped__', None): + orig = orig.__wrapped__ + meth = around(orig) + meth.__wrapped__ = orig + setattr(self, attr, meth) + + def __call__(self, *args, **kwargs): + _task_stack.push(self) + self.push_request() + try: + # add self if this is a bound task + if self.__self__ is not None: + return self.run(self.__self__, *args, **kwargs) + return self.run(*args, **kwargs) + finally: + self.pop_request() + _task_stack.pop() + + def __reduce__(self): + # - tasks are pickled into the name of the task only, and the reciever + # - simply grabs it from the local registry. + # - in later versions the module of the task is also included, + # - and the receiving side tries to import that module so that + # - it will work even if the task has not been registered. + mod = type(self).__module__ + mod = mod if mod and mod in sys.modules else None + return (_unpickle_task_v2, (self.name, mod), None) + + def run(self, *args, **kwargs): + """The body of the task executed by workers.""" + raise NotImplementedError('Tasks must define the run method.') + + def start_strategy(self, app, consumer, **kwargs): + return instantiate(self.Strategy, self, app, consumer, **kwargs) + + def delay(self, *args, **kwargs): + """Star argument version of :meth:`apply_async`. + + Does not support the extra options enabled by :meth:`apply_async`. + + :param \*args: positional arguments passed on to the task. + :param \*\*kwargs: keyword arguments passed on to the task. + + :returns :class:`celery.result.AsyncResult`: + + """ + return self.apply_async(args, kwargs) + + def apply_async(self, args=None, kwargs=None, task_id=None, producer=None, + link=None, link_error=None, **options): + """Apply tasks asynchronously by sending a message. + + :keyword args: The positional arguments to pass on to the + task (a :class:`list` or :class:`tuple`). + + :keyword kwargs: The keyword arguments to pass on to the + task (a :class:`dict`) + + :keyword countdown: Number of seconds into the future that the + task should execute. Defaults to immediate + execution. + + :keyword eta: A :class:`~datetime.datetime` object describing + the absolute time and date of when the task should + be executed. May not be specified if `countdown` + is also supplied. + + :keyword expires: Either a :class:`int`, describing the number of + seconds, or a :class:`~datetime.datetime` object + that describes the absolute time and date of when + the task should expire. The task will not be + executed after the expiration time. + + :keyword connection: Re-use existing broker connection instead + of establishing a new one. + + :keyword retry: If enabled sending of the task message will be retried + in the event of connection loss or failure. Default + is taken from the :setting:`CELERY_TASK_PUBLISH_RETRY` + setting. Note that you need to handle the + producer/connection manually for this to work. + + :keyword retry_policy: Override the retry policy used. See the + :setting:`CELERY_TASK_PUBLISH_RETRY_POLICY` + setting. + + :keyword routing_key: Custom routing key used to route the task to a + worker server. If in combination with a + ``queue`` argument only used to specify custom + routing keys to topic exchanges. + + :keyword queue: The queue to route the task to. This must be a key + present in :setting:`CELERY_QUEUES`, or + :setting:`CELERY_CREATE_MISSING_QUEUES` must be + enabled. See :ref:`guide-routing` for more + information. + + :keyword exchange: Named custom exchange to send the task to. + Usually not used in combination with the ``queue`` + argument. + + :keyword priority: The task priority, a number between 0 and 9. + Defaults to the :attr:`priority` attribute. + + :keyword serializer: A string identifying the default + serialization method to use. Can be `pickle`, + `json`, `yaml`, `msgpack` or any custom + serialization method that has been registered + with :mod:`kombu.serialization.registry`. + Defaults to the :attr:`serializer` attribute. + + :keyword compression: A string identifying the compression method + to use. Can be one of ``zlib``, ``bzip2``, + or any custom compression methods registered with + :func:`kombu.compression.register`. Defaults to + the :setting:`CELERY_MESSAGE_COMPRESSION` + setting. + :keyword link: A single, or a list of tasks to apply if the + task exits successfully. + :keyword link_error: A single, or a list of tasks to apply + if an error occurs while executing the task. + + :keyword producer: :class:~@amqp.TaskProducer` instance to use. + + :keyword add_to_parent: If set to True (default) and the task + is applied while executing another task, then the result + will be appended to the parent tasks ``request.children`` + attribute. Trailing can also be disabled by default using the + :attr:`trail` attribute + + :keyword publisher: Deprecated alias to ``producer``. + + :keyword headers: Message headers to be sent in the + task (a :class:`dict`) + + :rtype :class:`celery.result.AsyncResult`: if + :setting:`CELERY_ALWAYS_EAGER` is not set, otherwise + :class:`celery.result.EagerResult`. + + Also supports all keyword arguments supported by + :meth:`kombu.Producer.publish`. + + .. note:: + If the :setting:`CELERY_ALWAYS_EAGER` setting is set, it will + be replaced by a local :func:`apply` call instead. + + """ + app = self._get_app() + if app.conf.CELERY_ALWAYS_EAGER: + return self.apply(args, kwargs, task_id=task_id or uuid(), + link=link, link_error=link_error, **options) + # add 'self' if this is a "task_method". + if self.__self__ is not None: + args = args if isinstance(args, tuple) else tuple(args or ()) + args = (self.__self__, ) + args + return app.send_task( + self.name, args, kwargs, task_id=task_id, producer=producer, + link=link, link_error=link_error, result_cls=self.AsyncResult, + **dict(self._get_exec_options(), **options) + ) + + def subtask_from_request(self, request=None, args=None, kwargs=None, + queue=None, **extra_options): + request = self.request if request is None else request + args = request.args if args is None else args + kwargs = request.kwargs if kwargs is None else kwargs + limit_hard, limit_soft = request.timelimit or (None, None) + options = { + 'task_id': request.id, + 'link': request.callbacks, + 'link_error': request.errbacks, + 'group_id': request.group, + 'chord': request.chord, + 'soft_time_limit': limit_soft, + 'time_limit': limit_hard, + 'reply_to': request.reply_to, + 'headers': request.headers, + } + options.update( + {'queue': queue} if queue else (request.delivery_info or {}) + ) + return self.subtask(args, kwargs, options, type=self, **extra_options) + + def retry(self, args=None, kwargs=None, exc=None, throw=True, + eta=None, countdown=None, max_retries=None, **options): + """Retry the task. + + :param args: Positional arguments to retry with. + :param kwargs: Keyword arguments to retry with. + :keyword exc: Custom exception to report when the max restart + limit has been exceeded (default: + :exc:`~@MaxRetriesExceededError`). + + If this argument is set and retry is called while + an exception was raised (``sys.exc_info()`` is set) + it will attempt to reraise the current exception. + + If no exception was raised it will raise the ``exc`` + argument provided. + :keyword countdown: Time in seconds to delay the retry for. + :keyword eta: Explicit time and date to run the retry at + (must be a :class:`~datetime.datetime` instance). + :keyword max_retries: If set, overrides the default retry limit for + this execution. Changes to this parameter do not propagate to + subsequent task retry attempts. A value of :const:`None`, means + "use the default", so if you want infinite retries you would + have to set the :attr:`max_retries` attribute of the task to + :const:`None` first. + :keyword time_limit: If set, overrides the default time limit. + :keyword soft_time_limit: If set, overrides the default soft + time limit. + :keyword \*\*options: Any extra options to pass on to + meth:`apply_async`. + :keyword throw: If this is :const:`False`, do not raise the + :exc:`~@Retry` exception, + that tells the worker to mark the task as being + retried. Note that this means the task will be + marked as failed if the task raises an exception, + or successful if it returns. + + :raises celery.exceptions.Retry: To tell the worker that + the task has been re-sent for retry. This always happens, + unless the `throw` keyword argument has been explicitly set + to :const:`False`, and is considered normal operation. + + **Example** + + .. code-block:: python + + >>> from imaginary_twitter_lib import Twitter + >>> from proj.celery import app + + >>> @app.task(bind=True) + ... def tweet(self, auth, message): + ... twitter = Twitter(oauth=auth) + ... try: + ... twitter.post_status_update(message) + ... except twitter.FailWhale as exc: + ... # Retry in 5 minutes. + ... raise self.retry(countdown=60 * 5, exc=exc) + + Although the task will never return above as `retry` raises an + exception to notify the worker, we use `raise` in front of the retry + to convey that the rest of the block will not be executed. + + """ + request = self.request + retries = request.retries + 1 + max_retries = self.max_retries if max_retries is None else max_retries + + # Not in worker or emulated by (apply/always_eager), + # so just raise the original exception. + if request.called_directly: + maybe_reraise() # raise orig stack if PyErr_Occurred + raise exc or Retry('Task can be retried', None) + + if not eta and countdown is None: + countdown = self.default_retry_delay + + is_eager = request.is_eager + S = self.subtask_from_request( + request, args, kwargs, + countdown=countdown, eta=eta, retries=retries, + **options + ) + + if max_retries is not None and retries > max_retries: + if exc: + # first try to reraise the original exception + maybe_reraise() + # or if not in an except block then raise the custom exc. + raise exc + raise self.MaxRetriesExceededError( + "Can't retry {0}[{1}] args:{2} kwargs:{3}".format( + self.name, request.id, S.args, S.kwargs)) + + ret = Retry(exc=exc, when=eta or countdown) + + if is_eager: + # if task was executed eagerly using apply(), + # then the retry must also be executed eagerly. + S.apply().get() + return ret + + try: + S.apply_async() + except Exception as exc: + raise Reject(exc, requeue=False) + if throw: + raise ret + return ret + + def apply(self, args=None, kwargs=None, + link=None, link_error=None, **options): + """Execute this task locally, by blocking until the task returns. + + :param args: positional arguments passed on to the task. + :param kwargs: keyword arguments passed on to the task. + :keyword throw: Re-raise task exceptions. Defaults to + the :setting:`CELERY_EAGER_PROPAGATES_EXCEPTIONS` + setting. + + :rtype :class:`celery.result.EagerResult`: + + """ + # trace imports Task, so need to import inline. + from celery.app.trace import eager_trace_task + + app = self._get_app() + args = args or () + # add 'self' if this is a bound method. + if self.__self__ is not None: + args = (self.__self__, ) + tuple(args) + kwargs = kwargs or {} + task_id = options.get('task_id') or uuid() + retries = options.get('retries', 0) + throw = app.either('CELERY_EAGER_PROPAGATES_EXCEPTIONS', + options.pop('throw', None)) + + # Make sure we get the task instance, not class. + task = app._tasks[self.name] + + request = {'id': task_id, + 'retries': retries, + 'is_eager': True, + 'logfile': options.get('logfile'), + 'loglevel': options.get('loglevel', 0), + 'callbacks': maybe_list(link), + 'errbacks': maybe_list(link_error), + 'headers': options.get('headers'), + 'delivery_info': {'is_eager': True}} + if self.accept_magic_kwargs: + default_kwargs = {'task_name': task.name, + 'task_id': task_id, + 'task_retries': retries, + 'task_is_eager': True, + 'logfile': options.get('logfile'), + 'loglevel': options.get('loglevel', 0), + 'delivery_info': {'is_eager': True}} + supported_keys = fun_takes_kwargs(task.run, default_kwargs) + extend_with = dict((key, val) + for key, val in items(default_kwargs) + if key in supported_keys) + kwargs.update(extend_with) + + tb = None + retval, info = eager_trace_task(task, task_id, args, kwargs, + app=self._get_app(), + request=request, propagate=throw) + if isinstance(retval, ExceptionInfo): + retval, tb = retval.exception, retval.traceback + state = states.SUCCESS if info is None else info.state + return EagerResult(task_id, retval, state, traceback=tb) + + def AsyncResult(self, task_id, **kwargs): + """Get AsyncResult instance for this kind of task. + + :param task_id: Task id to get result for. + + """ + return self._get_app().AsyncResult(task_id, backend=self.backend, + task_name=self.name, **kwargs) + + def subtask(self, args=None, *starargs, **starkwargs): + """Return :class:`~celery.signature` object for + this task, wrapping arguments and execution options + for a single task invocation.""" + starkwargs.setdefault('app', self.app) + return signature(self, args, *starargs, **starkwargs) + + def s(self, *args, **kwargs): + """``.s(*a, **k) -> .subtask(a, k)``""" + return self.subtask(args, kwargs) + + def si(self, *args, **kwargs): + """``.si(*a, **k) -> .subtask(a, k, immutable=True)``""" + return self.subtask(args, kwargs, immutable=True) + + def chunks(self, it, n): + """Creates a :class:`~celery.canvas.chunks` task for this task.""" + from celery import chunks + return chunks(self.s(), it, n, app=self.app) + + def map(self, it): + """Creates a :class:`~celery.canvas.xmap` task from ``it``.""" + from celery import xmap + return xmap(self.s(), it, app=self.app) + + def starmap(self, it): + """Creates a :class:`~celery.canvas.xstarmap` task from ``it``.""" + from celery import xstarmap + return xstarmap(self.s(), it, app=self.app) + + def send_event(self, type_, **fields): + req = self.request + with self.app.events.default_dispatcher(hostname=req.hostname) as d: + return d.send(type_, uuid=req.id, **fields) + + def update_state(self, task_id=None, state=None, meta=None): + """Update task state. + + :keyword task_id: Id of the task to update, defaults to the + id of the current task + :keyword state: New state (:class:`str`). + :keyword meta: State metadata (:class:`dict`). + + + + """ + if task_id is None: + task_id = self.request.id + self.backend.store_result(task_id, meta, state) + + def on_success(self, retval, task_id, args, kwargs): + """Success handler. + + Run by the worker if the task executes successfully. + + :param retval: The return value of the task. + :param task_id: Unique id of the executed task. + :param args: Original arguments for the executed task. + :param kwargs: Original keyword arguments for the executed task. + + The return value of this handler is ignored. + + """ + pass + + def on_retry(self, exc, task_id, args, kwargs, einfo): + """Retry handler. + + This is run by the worker when the task is to be retried. + + :param exc: The exception sent to :meth:`retry`. + :param task_id: Unique id of the retried task. + :param args: Original arguments for the retried task. + :param kwargs: Original keyword arguments for the retried task. + + :keyword einfo: :class:`~billiard.einfo.ExceptionInfo` + instance, containing the traceback. + + The return value of this handler is ignored. + + """ + pass + + def on_failure(self, exc, task_id, args, kwargs, einfo): + """Error handler. + + This is run by the worker when the task fails. + + :param exc: The exception raised by the task. + :param task_id: Unique id of the failed task. + :param args: Original arguments for the task that failed. + :param kwargs: Original keyword arguments for the task + that failed. + + :keyword einfo: :class:`~billiard.einfo.ExceptionInfo` + instance, containing the traceback. + + The return value of this handler is ignored. + + """ + pass + + def after_return(self, status, retval, task_id, args, kwargs, einfo): + """Handler called after the task returns. + + :param status: Current task state. + :param retval: Task return value/exception. + :param task_id: Unique id of the task. + :param args: Original arguments for the task. + :param kwargs: Original keyword arguments for the task. + + :keyword einfo: :class:`~billiard.einfo.ExceptionInfo` + instance, containing the traceback (if any). + + The return value of this handler is ignored. + + """ + pass + + def send_error_email(self, context, exc, **kwargs): + if self.send_error_emails and \ + not getattr(self, 'disable_error_emails', None): + self.ErrorMail(self, **kwargs).send(context, exc) + + def add_trail(self, result): + if self.trail: + self.request.children.append(result) + return result + + def push_request(self, *args, **kwargs): + self.request_stack.push(Context(*args, **kwargs)) + + def pop_request(self): + self.request_stack.pop() + + def __repr__(self): + """`repr(task)`""" + return _reprtask(self, R_SELF_TASK if self.__self__ else R_INSTANCE) + + def _get_request(self): + """Get current request object.""" + req = self.request_stack.top + if req is None: + # task was not called, but some may still expect a request + # to be there, perhaps that should be deprecated. + if self._default_request is None: + self._default_request = Context() + return self._default_request + return req + request = property(_get_request) + + def _get_exec_options(self): + if self._exec_options is None: + self._exec_options = extract_exec_options(self) + return self._exec_options + + @property + def backend(self): + backend = self._backend + if backend is None: + return self.app.backend + return backend + + @backend.setter + def backend(self, value): # noqa + self._backend = value + + @property + def __name__(self): + return self.__class__.__name__ +BaseTask = Task # compat alias diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/trace.py b/thesisenv/lib/python3.6/site-packages/celery/app/trace.py new file mode 100644 index 0000000..feea0e8 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/app/trace.py @@ -0,0 +1,441 @@ +# -*- coding: utf-8 -*- +""" + celery.app.trace + ~~~~~~~~~~~~~~~~ + + This module defines how the task execution is traced: + errors are recorded, handlers are applied and so on. + +""" +from __future__ import absolute_import + +# ## --- +# This is the heart of the worker, the inner loop so to speak. +# It used to be split up into nice little classes and methods, +# but in the end it only resulted in bad performance and horrible tracebacks, +# so instead we now use one closure per task class. + +import os +import socket +import sys + +from warnings import warn + +from billiard.einfo import ExceptionInfo +from kombu.exceptions import EncodeError +from kombu.utils import kwdict + +from celery import current_app, group +from celery import states, signals +from celery._state import _task_stack +from celery.app import set_default_app +from celery.app.task import Task as BaseTask, Context +from celery.exceptions import Ignore, Reject, Retry +from celery.utils.log import get_logger +from celery.utils.objects import mro_lookup +from celery.utils.serialization import ( + get_pickleable_exception, + get_pickleable_etype, +) + +__all__ = ['TraceInfo', 'build_tracer', 'trace_task', 'eager_trace_task', + 'setup_worker_optimizations', 'reset_worker_optimizations'] + +_logger = get_logger(__name__) + +send_prerun = signals.task_prerun.send +send_postrun = signals.task_postrun.send +send_success = signals.task_success.send +STARTED = states.STARTED +SUCCESS = states.SUCCESS +IGNORED = states.IGNORED +REJECTED = states.REJECTED +RETRY = states.RETRY +FAILURE = states.FAILURE +EXCEPTION_STATES = states.EXCEPTION_STATES +IGNORE_STATES = frozenset([IGNORED, RETRY, REJECTED]) + +#: set by :func:`setup_worker_optimizations` +_tasks = None +_patched = {} + + +def task_has_custom(task, attr): + """Return true if the task or one of its bases + defines ``attr`` (excluding the one in BaseTask).""" + return mro_lookup(task.__class__, attr, stop=(BaseTask, object), + monkey_patched=['celery.app.task']) + + +class TraceInfo(object): + __slots__ = ('state', 'retval') + + def __init__(self, state, retval=None): + self.state = state + self.retval = retval + + def handle_error_state(self, task, eager=False): + store_errors = not eager + if task.ignore_result: + store_errors = task.store_errors_even_if_ignored + + return { + RETRY: self.handle_retry, + FAILURE: self.handle_failure, + }[self.state](task, store_errors=store_errors) + + def handle_retry(self, task, store_errors=True): + """Handle retry exception.""" + # the exception raised is the Retry semi-predicate, + # and it's exc' attribute is the original exception raised (if any). + req = task.request + type_, _, tb = sys.exc_info() + try: + reason = self.retval + einfo = ExceptionInfo((type_, reason, tb)) + if store_errors: + task.backend.mark_as_retry( + req.id, reason.exc, einfo.traceback, request=req, + ) + task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo) + signals.task_retry.send(sender=task, request=req, + reason=reason, einfo=einfo) + return einfo + finally: + del(tb) + + def handle_failure(self, task, store_errors=True): + """Handle exception.""" + req = task.request + type_, _, tb = sys.exc_info() + try: + exc = self.retval + einfo = ExceptionInfo() + einfo.exception = get_pickleable_exception(einfo.exception) + einfo.type = get_pickleable_etype(einfo.type) + if store_errors: + task.backend.mark_as_failure( + req.id, exc, einfo.traceback, request=req, + ) + task.on_failure(exc, req.id, req.args, req.kwargs, einfo) + signals.task_failure.send(sender=task, task_id=req.id, + exception=exc, args=req.args, + kwargs=req.kwargs, + traceback=tb, + einfo=einfo) + return einfo + finally: + del(tb) + + +def build_tracer(name, task, loader=None, hostname=None, store_errors=True, + Info=TraceInfo, eager=False, propagate=False, app=None, + IGNORE_STATES=IGNORE_STATES): + """Return a function that traces task execution; catches all + exceptions and updates result backend with the state and result + + If the call was successful, it saves the result to the task result + backend, and sets the task status to `"SUCCESS"`. + + If the call raises :exc:`~@Retry`, it extracts + the original exception, uses that as the result and sets the task state + to `"RETRY"`. + + If the call results in an exception, it saves the exception as the task + result, and sets the task state to `"FAILURE"`. + + Return a function that takes the following arguments: + + :param uuid: The id of the task. + :param args: List of positional args to pass on to the function. + :param kwargs: Keyword arguments mapping to pass on to the function. + :keyword request: Request dict. + + """ + # If the task doesn't define a custom __call__ method + # we optimize it away by simply calling the run method directly, + # saving the extra method call and a line less in the stack trace. + fun = task if task_has_custom(task, '__call__') else task.run + + loader = loader or app.loader + backend = task.backend + ignore_result = task.ignore_result + track_started = task.track_started + track_started = not eager and (task.track_started and not ignore_result) + publish_result = not eager and not ignore_result + hostname = hostname or socket.gethostname() + + loader_task_init = loader.on_task_init + loader_cleanup = loader.on_process_cleanup + + task_on_success = None + task_after_return = None + if task_has_custom(task, 'on_success'): + task_on_success = task.on_success + if task_has_custom(task, 'after_return'): + task_after_return = task.after_return + + store_result = backend.store_result + backend_cleanup = backend.process_cleanup + + pid = os.getpid() + + request_stack = task.request_stack + push_request = request_stack.push + pop_request = request_stack.pop + push_task = _task_stack.push + pop_task = _task_stack.pop + on_chord_part_return = backend.on_chord_part_return + + prerun_receivers = signals.task_prerun.receivers + postrun_receivers = signals.task_postrun.receivers + success_receivers = signals.task_success.receivers + + from celery import canvas + signature = canvas.maybe_signature # maybe_ does not clone if already + + def on_error(request, exc, uuid, state=FAILURE, call_errbacks=True): + if propagate: + raise + I = Info(state, exc) + R = I.handle_error_state(task, eager=eager) + if call_errbacks: + group( + [signature(errback, app=app) + for errback in request.errbacks or []], app=app, + ).apply_async((uuid, )) + return I, R, I.state, I.retval + + def trace_task(uuid, args, kwargs, request=None): + # R - is the possibly prepared return value. + # I - is the Info object. + # retval - is the always unmodified return value. + # state - is the resulting task state. + + # This function is very long because we have unrolled all the calls + # for performance reasons, and because the function is so long + # we want the main variables (I, and R) to stand out visually from the + # the rest of the variables, so breaking PEP8 is worth it ;) + R = I = retval = state = None + kwargs = kwdict(kwargs) + try: + push_task(task) + task_request = Context(request or {}, args=args, + called_directly=False, kwargs=kwargs) + push_request(task_request) + try: + # -*- PRE -*- + if prerun_receivers: + send_prerun(sender=task, task_id=uuid, task=task, + args=args, kwargs=kwargs) + loader_task_init(uuid, task) + if track_started: + store_result( + uuid, {'pid': pid, 'hostname': hostname}, STARTED, + request=task_request, + ) + + # -*- TRACE -*- + try: + R = retval = fun(*args, **kwargs) + state = SUCCESS + except Reject as exc: + I, R = Info(REJECTED, exc), ExceptionInfo(internal=True) + state, retval = I.state, I.retval + except Ignore as exc: + I, R = Info(IGNORED, exc), ExceptionInfo(internal=True) + state, retval = I.state, I.retval + except Retry as exc: + I, R, state, retval = on_error( + task_request, exc, uuid, RETRY, call_errbacks=False, + ) + except Exception as exc: + I, R, state, retval = on_error(task_request, exc, uuid) + except BaseException as exc: + raise + else: + try: + # callback tasks must be applied before the result is + # stored, so that result.children is populated. + + # groups are called inline and will store trail + # separately, so need to call them separately + # so that the trail's not added multiple times :( + # (Issue #1936) + callbacks = task.request.callbacks + if callbacks: + if len(task.request.callbacks) > 1: + sigs, groups = [], [] + for sig in callbacks: + sig = signature(sig, app=app) + if isinstance(sig, group): + groups.append(sig) + else: + sigs.append(sig) + for group_ in groups: + group_.apply_async((retval, )) + if sigs: + group(sigs).apply_async((retval, )) + else: + signature(callbacks[0], app=app).delay(retval) + if publish_result: + store_result( + uuid, retval, SUCCESS, request=task_request, + ) + except EncodeError as exc: + I, R, state, retval = on_error(task_request, exc, uuid) + else: + if task_on_success: + task_on_success(retval, uuid, args, kwargs) + if success_receivers: + send_success(sender=task, result=retval) + + # -* POST *- + if state not in IGNORE_STATES: + if task_request.chord: + on_chord_part_return(task, state, R) + if task_after_return: + task_after_return( + state, retval, uuid, args, kwargs, None, + ) + finally: + try: + if postrun_receivers: + send_postrun(sender=task, task_id=uuid, task=task, + args=args, kwargs=kwargs, + retval=retval, state=state) + finally: + pop_task() + pop_request() + if not eager: + try: + backend_cleanup() + loader_cleanup() + except (KeyboardInterrupt, SystemExit, MemoryError): + raise + except Exception as exc: + _logger.error('Process cleanup failed: %r', exc, + exc_info=True) + except MemoryError: + raise + except Exception as exc: + if eager: + raise + R = report_internal_error(task, exc) + return R, I + + return trace_task + + +def trace_task(task, uuid, args, kwargs, request={}, **opts): + try: + if task.__trace__ is None: + task.__trace__ = build_tracer(task.name, task, **opts) + return task.__trace__(uuid, args, kwargs, request)[0] + except Exception as exc: + return report_internal_error(task, exc) + + +def _trace_task_ret(name, uuid, args, kwargs, request={}, app=None, **opts): + app = app or current_app + return trace_task(app.tasks[name], + uuid, args, kwargs, request, app=app, **opts) +trace_task_ret = _trace_task_ret + + +def _fast_trace_task(task, uuid, args, kwargs, request={}): + # setup_worker_optimizations will point trace_task_ret to here, + # so this is the function used in the worker. + return _tasks[task].__trace__(uuid, args, kwargs, request)[0] + + +def eager_trace_task(task, uuid, args, kwargs, request=None, **opts): + opts.setdefault('eager', True) + return build_tracer(task.name, task, **opts)( + uuid, args, kwargs, request) + + +def report_internal_error(task, exc): + _type, _value, _tb = sys.exc_info() + try: + _value = task.backend.prepare_exception(exc, 'pickle') + exc_info = ExceptionInfo((_type, _value, _tb), internal=True) + warn(RuntimeWarning( + 'Exception raised outside body: {0!r}:\n{1}'.format( + exc, exc_info.traceback))) + return exc_info + finally: + del(_tb) + + +def setup_worker_optimizations(app): + global _tasks + global trace_task_ret + + # make sure custom Task.__call__ methods that calls super + # will not mess up the request/task stack. + _install_stack_protection() + + # all new threads start without a current app, so if an app is not + # passed on to the thread it will fall back to the "default app", + # which then could be the wrong app. So for the worker + # we set this to always return our app. This is a hack, + # and means that only a single app can be used for workers + # running in the same process. + app.set_current() + set_default_app(app) + + # evaluate all task classes by finalizing the app. + app.finalize() + + # set fast shortcut to task registry + _tasks = app._tasks + + trace_task_ret = _fast_trace_task + from celery.worker import job as job_module + job_module.trace_task_ret = _fast_trace_task + job_module.__optimize__() + + +def reset_worker_optimizations(): + global trace_task_ret + trace_task_ret = _trace_task_ret + try: + delattr(BaseTask, '_stackprotected') + except AttributeError: + pass + try: + BaseTask.__call__ = _patched.pop('BaseTask.__call__') + except KeyError: + pass + from celery.worker import job as job_module + job_module.trace_task_ret = _trace_task_ret + + +def _install_stack_protection(): + # Patches BaseTask.__call__ in the worker to handle the edge case + # where people override it and also call super. + # + # - The worker optimizes away BaseTask.__call__ and instead + # calls task.run directly. + # - so with the addition of current_task and the request stack + # BaseTask.__call__ now pushes to those stacks so that + # they work when tasks are called directly. + # + # The worker only optimizes away __call__ in the case + # where it has not been overridden, so the request/task stack + # will blow if a custom task class defines __call__ and also + # calls super(). + if not getattr(BaseTask, '_stackprotected', False): + _patched['BaseTask.__call__'] = orig = BaseTask.__call__ + + def __protected_call__(self, *args, **kwargs): + stack = self.request_stack + req = stack.top + if req and not req._protected and \ + len(stack) == 1 and not req.called_directly: + req._protected = 1 + return self.run(*args, **kwargs) + return orig(self, *args, **kwargs) + BaseTask.__call__ = __protected_call__ + BaseTask._stackprotected = True diff --git a/thesisenv/lib/python3.6/site-packages/celery/app/utils.py b/thesisenv/lib/python3.6/site-packages/celery/app/utils.py new file mode 100644 index 0000000..b76290b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/app/utils.py @@ -0,0 +1,266 @@ +# -*- coding: utf-8 -*- +""" + celery.app.utils + ~~~~~~~~~~~~~~~~ + + App utilities: Compat settings, bugreport tool, pickling apps. + +""" +from __future__ import absolute_import + +import os +import platform as _platform +import re + +from collections import Mapping +from types import ModuleType + +from kombu.utils.url import maybe_sanitize_url + +from celery.datastructures import ConfigurationView +from celery.five import items, string_t, values +from celery.platforms import pyimplementation +from celery.utils.text import pretty +from celery.utils.imports import import_from_cwd, symbol_by_name, qualname + +from .defaults import find + +__all__ = ['Settings', 'appstr', 'bugreport', + 'filter_hidden_settings', 'find_app'] + +#: Format used to generate bugreport information. +BUGREPORT_INFO = """ +software -> celery:{celery_v} kombu:{kombu_v} py:{py_v} + billiard:{billiard_v} {driver_v} +platform -> system:{system} arch:{arch} imp:{py_i} +loader -> {loader} +settings -> transport:{transport} results:{results} + +{human_settings} +""" + +HIDDEN_SETTINGS = re.compile( + 'API|TOKEN|KEY|SECRET|PASS|PROFANITIES_LIST|SIGNATURE|DATABASE', + re.IGNORECASE, +) + + +def appstr(app): + """String used in __repr__ etc, to id app instances.""" + return '{0}:0x{1:x}'.format(app.main or '__main__', id(app)) + + +class Settings(ConfigurationView): + """Celery settings object. + + .. seealso: + + :ref:`configuration` for a full list of configuration keys. + + """ + + @property + def CELERY_RESULT_BACKEND(self): + return self.first('CELERY_RESULT_BACKEND', 'CELERY_BACKEND') + + @property + def BROKER_TRANSPORT(self): + return self.first('BROKER_TRANSPORT', + 'BROKER_BACKEND', 'CARROT_BACKEND') + + @property + def BROKER_BACKEND(self): + """Deprecated compat alias to :attr:`BROKER_TRANSPORT`.""" + return self.BROKER_TRANSPORT + + @property + def BROKER_URL(self): + return (os.environ.get('CELERY_BROKER_URL') or + self.first('BROKER_URL', 'BROKER_HOST')) + + @property + def CELERY_TIMEZONE(self): + # this way we also support django's time zone. + return self.first('CELERY_TIMEZONE', 'TIME_ZONE') + + def without_defaults(self): + """Return the current configuration, but without defaults.""" + # the last stash is the default settings, so just skip that + return Settings({}, self._order[:-1]) + + def value_set_for(self, key): + return key in self.without_defaults() + + def find_option(self, name, namespace='celery'): + """Search for option by name. + + Will return ``(namespace, key, type)`` tuple, e.g.:: + + >>> from proj.celery import app + >>> app.conf.find_option('disable_rate_limits') + ('CELERY', 'DISABLE_RATE_LIMITS', + bool default->False>)) + + :param name: Name of option, cannot be partial. + :keyword namespace: Preferred namespace (``CELERY`` by default). + + """ + return find(name, namespace) + + def find_value_for_key(self, name, namespace='celery'): + """Shortcut to ``get_by_parts(*find_option(name)[:-1])``""" + return self.get_by_parts(*self.find_option(name, namespace)[:-1]) + + def get_by_parts(self, *parts): + """Return the current value for setting specified as a path. + + Example:: + + >>> from proj.celery import app + >>> app.conf.get_by_parts('CELERY', 'DISABLE_RATE_LIMITS') + False + + """ + return self['_'.join(part for part in parts if part)] + + def table(self, with_defaults=False, censored=True): + filt = filter_hidden_settings if censored else lambda v: v + return filt(dict( + (k, v) for k, v in items( + self if with_defaults else self.without_defaults()) + if k.isupper() and not k.startswith('_') + )) + + def humanize(self, with_defaults=False, censored=True): + """Return a human readable string showing changes to the + configuration.""" + return '\n'.join( + '{0}: {1}'.format(key, pretty(value, width=50)) + for key, value in items(self.table(with_defaults, censored))) + + +class AppPickler(object): + """Old application pickler/unpickler (< 3.1).""" + + def __call__(self, cls, *args): + kwargs = self.build_kwargs(*args) + app = self.construct(cls, **kwargs) + self.prepare(app, **kwargs) + return app + + def prepare(self, app, **kwargs): + app.conf.update(kwargs['changes']) + + def build_kwargs(self, *args): + return self.build_standard_kwargs(*args) + + def build_standard_kwargs(self, main, changes, loader, backend, amqp, + events, log, control, accept_magic_kwargs, + config_source=None): + return dict(main=main, loader=loader, backend=backend, amqp=amqp, + changes=changes, events=events, log=log, control=control, + set_as_current=False, + accept_magic_kwargs=accept_magic_kwargs, + config_source=config_source) + + def construct(self, cls, **kwargs): + return cls(**kwargs) + + +def _unpickle_app(cls, pickler, *args): + """Rebuild app for versions 2.5+""" + return pickler()(cls, *args) + + +def _unpickle_app_v2(cls, kwargs): + """Rebuild app for versions 3.1+""" + kwargs['set_as_current'] = False + return cls(**kwargs) + + +def filter_hidden_settings(conf): + + def maybe_censor(key, value, mask='*' * 8): + if isinstance(value, Mapping): + return filter_hidden_settings(value) + if isinstance(key, string_t): + if HIDDEN_SETTINGS.search(key): + return mask + elif 'BROKER_URL' in key.upper(): + from kombu import Connection + return Connection(value).as_uri(mask=mask) + elif key.upper() in ('CELERY_RESULT_BACKEND', 'CELERY_BACKEND'): + return maybe_sanitize_url(value, mask=mask) + + return value + + return dict((k, maybe_censor(k, v)) for k, v in items(conf)) + + +def bugreport(app): + """Return a string containing information useful in bug reports.""" + import billiard + import celery + import kombu + + try: + conn = app.connection() + driver_v = '{0}:{1}'.format(conn.transport.driver_name, + conn.transport.driver_version()) + transport = conn.transport_cls + except Exception: + transport = driver_v = '' + + return BUGREPORT_INFO.format( + system=_platform.system(), + arch=', '.join(x for x in _platform.architecture() if x), + py_i=pyimplementation(), + celery_v=celery.VERSION_BANNER, + kombu_v=kombu.__version__, + billiard_v=billiard.__version__, + py_v=_platform.python_version(), + driver_v=driver_v, + transport=transport, + results=maybe_sanitize_url( + app.conf.CELERY_RESULT_BACKEND or 'disabled'), + human_settings=app.conf.humanize(), + loader=qualname(app.loader.__class__), + ) + + +def find_app(app, symbol_by_name=symbol_by_name, imp=import_from_cwd): + from .base import Celery + + try: + sym = symbol_by_name(app, imp=imp) + except AttributeError: + # last part was not an attribute, but a module + sym = imp(app) + if isinstance(sym, ModuleType) and ':' not in app: + try: + found = sym.app + if isinstance(found, ModuleType): + raise AttributeError() + except AttributeError: + try: + found = sym.celery + if isinstance(found, ModuleType): + raise AttributeError() + except AttributeError: + if getattr(sym, '__path__', None): + try: + return find_app( + '{0}.celery'.format(app), + symbol_by_name=symbol_by_name, imp=imp, + ) + except ImportError: + pass + for suspect in values(vars(sym)): + if isinstance(suspect, Celery): + return suspect + raise + else: + return found + else: + return found + return sym diff --git a/thesisenv/lib/python3.6/site-packages/celery/apps/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/apps/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/celery/apps/beat.py b/thesisenv/lib/python3.6/site-packages/celery/apps/beat.py new file mode 100644 index 0000000..46cef9b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/apps/beat.py @@ -0,0 +1,151 @@ +# -*- coding: utf-8 -*- +""" + celery.apps.beat + ~~~~~~~~~~~~~~~~ + + This module is the 'program-version' of :mod:`celery.beat`. + + It does everything necessary to run that module + as an actual application, like installing signal handlers + and so on. + +""" +from __future__ import absolute_import, unicode_literals + +import numbers +import socket +import sys + +from celery import VERSION_BANNER, platforms, beat +from celery.utils.imports import qualname +from celery.utils.log import LOG_LEVELS, get_logger +from celery.utils.timeutils import humanize_seconds + +__all__ = ['Beat'] + +STARTUP_INFO_FMT = """ +Configuration -> + . broker -> {conninfo} + . loader -> {loader} + . scheduler -> {scheduler} +{scheduler_info} + . logfile -> {logfile}@%{loglevel} + . maxinterval -> {hmax_interval} ({max_interval}s) +""".strip() + +logger = get_logger('celery.beat') + + +class Beat(object): + Service = beat.Service + app = None + + def __init__(self, max_interval=None, app=None, + socket_timeout=30, pidfile=None, no_color=None, + loglevel=None, logfile=None, schedule=None, + scheduler_cls=None, redirect_stdouts=None, + redirect_stdouts_level=None, **kwargs): + """Starts the beat task scheduler.""" + self.app = app = app or self.app + self.loglevel = self._getopt('log_level', loglevel) + self.logfile = self._getopt('log_file', logfile) + self.schedule = self._getopt('schedule_filename', schedule) + self.scheduler_cls = self._getopt('scheduler', scheduler_cls) + self.redirect_stdouts = self._getopt( + 'redirect_stdouts', redirect_stdouts, + ) + self.redirect_stdouts_level = self._getopt( + 'redirect_stdouts_level', redirect_stdouts_level, + ) + + self.max_interval = max_interval + self.socket_timeout = socket_timeout + self.no_color = no_color + self.colored = app.log.colored( + self.logfile, + enabled=not no_color if no_color is not None else no_color, + ) + self.pidfile = pidfile + + if not isinstance(self.loglevel, numbers.Integral): + self.loglevel = LOG_LEVELS[self.loglevel.upper()] + + def _getopt(self, key, value): + if value is not None: + return value + return self.app.conf.find_value_for_key(key, namespace='celerybeat') + + def run(self): + print(str(self.colored.cyan( + 'celery beat v{0} is starting.'.format(VERSION_BANNER)))) + self.init_loader() + self.set_process_title() + self.start_scheduler() + + def setup_logging(self, colorize=None): + if colorize is None and self.no_color is not None: + colorize = not self.no_color + self.app.log.setup(self.loglevel, self.logfile, + self.redirect_stdouts, self.redirect_stdouts_level, + colorize=colorize) + + def start_scheduler(self): + c = self.colored + if self.pidfile: + platforms.create_pidlock(self.pidfile) + beat = self.Service(app=self.app, + max_interval=self.max_interval, + scheduler_cls=self.scheduler_cls, + schedule_filename=self.schedule) + + print(str(c.blue('__ ', c.magenta('-'), + c.blue(' ... __ '), c.magenta('-'), + c.blue(' _\n'), + c.reset(self.startup_info(beat))))) + self.setup_logging() + if self.socket_timeout: + logger.debug('Setting default socket timeout to %r', + self.socket_timeout) + socket.setdefaulttimeout(self.socket_timeout) + try: + self.install_sync_handler(beat) + beat.start() + except Exception as exc: + logger.critical('beat raised exception %s: %r', + exc.__class__, exc, + exc_info=True) + + def init_loader(self): + # Run the worker init handler. + # (Usually imports task modules and such.) + self.app.loader.init_worker() + self.app.finalize() + + def startup_info(self, beat): + scheduler = beat.get_scheduler(lazy=True) + return STARTUP_INFO_FMT.format( + conninfo=self.app.connection().as_uri(), + logfile=self.logfile or '[stderr]', + loglevel=LOG_LEVELS[self.loglevel], + loader=qualname(self.app.loader), + scheduler=qualname(scheduler), + scheduler_info=scheduler.info, + hmax_interval=humanize_seconds(beat.max_interval), + max_interval=beat.max_interval, + ) + + def set_process_title(self): + arg_start = 'manage' in sys.argv[0] and 2 or 1 + platforms.set_process_title( + 'celery beat', info=' '.join(sys.argv[arg_start:]), + ) + + def install_sync_handler(self, beat): + """Install a `SIGTERM` + `SIGINT` handler that saves + the beat schedule.""" + + def _sync(signum, frame): + beat.sync() + raise SystemExit() + + platforms.signals.update(SIGTERM=_sync, SIGINT=_sync) diff --git a/thesisenv/lib/python3.6/site-packages/celery/apps/worker.py b/thesisenv/lib/python3.6/site-packages/celery/apps/worker.py new file mode 100644 index 0000000..637a082 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/apps/worker.py @@ -0,0 +1,372 @@ +# -*- coding: utf-8 -*- +""" + celery.apps.worker + ~~~~~~~~~~~~~~~~~~ + + This module is the 'program-version' of :mod:`celery.worker`. + + It does everything necessary to run that module + as an actual application, like installing signal handlers, + platform tweaks, and so on. + +""" +from __future__ import absolute_import, print_function, unicode_literals + +import logging +import os +import platform as _platform +import sys +import warnings + +from functools import partial + +from billiard import current_process +from kombu.utils.encoding import safe_str + +from celery import VERSION_BANNER, platforms, signals +from celery.app import trace +from celery.exceptions import ( + CDeprecationWarning, WorkerShutdown, WorkerTerminate, +) +from celery.five import string, string_t +from celery.loaders.app import AppLoader +from celery.platforms import check_privileges +from celery.utils import cry, isatty +from celery.utils.imports import qualname +from celery.utils.log import get_logger, in_sighandler, set_in_sighandler +from celery.utils.text import pluralize +from celery.worker import WorkController + +__all__ = ['Worker'] + +logger = get_logger(__name__) +is_jython = sys.platform.startswith('java') +is_pypy = hasattr(sys, 'pypy_version_info') + +W_PICKLE_DEPRECATED = """ +Starting from version 3.2 Celery will refuse to accept pickle by default. + +The pickle serializer is a security concern as it may give attackers +the ability to execute any command. It's important to secure +your broker from unauthorized access when using pickle, so we think +that enabling pickle should require a deliberate action and not be +the default choice. + +If you depend on pickle then you should set a setting to disable this +warning and to be sure that everything will continue working +when you upgrade to Celery 3.2:: + + CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml'] + +You must only enable the serializers that you will actually use. + +""" + + +def active_thread_count(): + from threading import enumerate + return sum(1 for t in enumerate() + if not t.name.startswith('Dummy-')) + + +def safe_say(msg): + print('\n{0}'.format(msg), file=sys.__stderr__) + +ARTLINES = [ + ' --------------', + '---- **** -----', + '--- * *** * --', + '-- * - **** ---', + '- ** ----------', + '- ** ----------', + '- ** ----------', + '- ** ----------', + '- *** --- * ---', + '-- ******* ----', + '--- ***** -----', + ' --------------', +] + +BANNER = """\ +{hostname} v{version} + +{platform} + +[config] +.> app: {app} +.> transport: {conninfo} +.> results: {results} +.> concurrency: {concurrency} + +[queues] +{queues} +""" + +EXTRA_INFO_FMT = """ +[tasks] +{tasks} +""" + + +class Worker(WorkController): + + def on_before_init(self, **kwargs): + trace.setup_worker_optimizations(self.app) + + # this signal can be used to set up configuration for + # workers by name. + signals.celeryd_init.send( + sender=self.hostname, instance=self, + conf=self.app.conf, options=kwargs, + ) + check_privileges(self.app.conf.CELERY_ACCEPT_CONTENT) + + def on_after_init(self, purge=False, no_color=None, + redirect_stdouts=None, redirect_stdouts_level=None, + **kwargs): + self.redirect_stdouts = self._getopt( + 'redirect_stdouts', redirect_stdouts, + ) + self.redirect_stdouts_level = self._getopt( + 'redirect_stdouts_level', redirect_stdouts_level, + ) + super(Worker, self).setup_defaults(**kwargs) + self.purge = purge + self.no_color = no_color + self._isatty = isatty(sys.stdout) + self.colored = self.app.log.colored( + self.logfile, + enabled=not no_color if no_color is not None else no_color + ) + + def on_init_blueprint(self): + self._custom_logging = self.setup_logging() + # apply task execution optimizations + # -- This will finalize the app! + trace.setup_worker_optimizations(self.app) + + def on_start(self): + if not self._custom_logging and self.redirect_stdouts: + self.app.log.redirect_stdouts(self.redirect_stdouts_level) + + WorkController.on_start(self) + + # this signal can be used to e.g. change queues after + # the -Q option has been applied. + signals.celeryd_after_setup.send( + sender=self.hostname, instance=self, conf=self.app.conf, + ) + + if not self.app.conf.value_set_for('CELERY_ACCEPT_CONTENT'): + warnings.warn(CDeprecationWarning(W_PICKLE_DEPRECATED)) + + if self.purge: + self.purge_messages() + + # Dump configuration to screen so we have some basic information + # for when users sends bug reports. + print(safe_str(''.join([ + string(self.colored.cyan(' \n', self.startup_info())), + string(self.colored.reset(self.extra_info() or '')), + ])), file=sys.__stdout__) + self.set_process_status('-active-') + self.install_platform_tweaks(self) + + def on_consumer_ready(self, consumer): + signals.worker_ready.send(sender=consumer) + print('{0} ready.'.format(safe_str(self.hostname), )) + + def setup_logging(self, colorize=None): + if colorize is None and self.no_color is not None: + colorize = not self.no_color + return self.app.log.setup( + self.loglevel, self.logfile, + redirect_stdouts=False, colorize=colorize, hostname=self.hostname, + ) + + def purge_messages(self): + count = self.app.control.purge() + if count: + print('purge: Erased {0} {1} from the queue.\n'.format( + count, pluralize(count, 'message'))) + + def tasklist(self, include_builtins=True, sep='\n', int_='celery.'): + return sep.join( + ' . {0}'.format(task) for task in sorted(self.app.tasks) + if (not task.startswith(int_) if not include_builtins else task) + ) + + def extra_info(self): + if self.loglevel <= logging.INFO: + include_builtins = self.loglevel <= logging.DEBUG + tasklist = self.tasklist(include_builtins=include_builtins) + return EXTRA_INFO_FMT.format(tasks=tasklist) + + def startup_info(self): + app = self.app + concurrency = string(self.concurrency) + appr = '{0}:0x{1:x}'.format(app.main or '__main__', id(app)) + if not isinstance(app.loader, AppLoader): + loader = qualname(app.loader) + if loader.startswith('celery.loaders'): + loader = loader[14:] + appr += ' ({0})'.format(loader) + if self.autoscale: + max, min = self.autoscale + concurrency = '{{min={0}, max={1}}}'.format(min, max) + pool = self.pool_cls + if not isinstance(pool, string_t): + pool = pool.__module__ + concurrency += ' ({0})'.format(pool.split('.')[-1]) + events = 'ON' + if not self.send_events: + events = 'OFF (enable -E to monitor this worker)' + + banner = BANNER.format( + app=appr, + hostname=safe_str(self.hostname), + version=VERSION_BANNER, + conninfo=self.app.connection().as_uri(), + results=self.app.backend.as_uri(), + concurrency=concurrency, + platform=safe_str(_platform.platform()), + events=events, + queues=app.amqp.queues.format(indent=0, indent_first=False), + ).splitlines() + + # integrate the ASCII art. + for i, x in enumerate(banner): + try: + banner[i] = ' '.join([ARTLINES[i], banner[i]]) + except IndexError: + banner[i] = ' ' * 16 + banner[i] + return '\n'.join(banner) + '\n' + + def install_platform_tweaks(self, worker): + """Install platform specific tweaks and workarounds.""" + if self.app.IS_OSX: + self.osx_proxy_detection_workaround() + + # Install signal handler so SIGHUP restarts the worker. + if not self._isatty: + # only install HUP handler if detached from terminal, + # so closing the terminal window doesn't restart the worker + # into the background. + if self.app.IS_OSX: + # OS X can't exec from a process using threads. + # See http://github.com/celery/celery/issues#issue/152 + install_HUP_not_supported_handler(worker) + else: + install_worker_restart_handler(worker) + install_worker_term_handler(worker) + install_worker_term_hard_handler(worker) + install_worker_int_handler(worker) + install_cry_handler() + install_rdb_handler() + + def osx_proxy_detection_workaround(self): + """See http://github.com/celery/celery/issues#issue/161""" + os.environ.setdefault('celery_dummy_proxy', 'set_by_celeryd') + + def set_process_status(self, info): + return platforms.set_mp_process_title( + 'celeryd', + info='{0} ({1})'.format(info, platforms.strargv(sys.argv)), + hostname=self.hostname, + ) + + +def _shutdown_handler(worker, sig='TERM', how='Warm', + exc=WorkerShutdown, callback=None): + + def _handle_request(*args): + with in_sighandler(): + from celery.worker import state + if current_process()._name == 'MainProcess': + if callback: + callback(worker) + safe_say('worker: {0} shutdown (MainProcess)'.format(how)) + if active_thread_count() > 1: + setattr(state, {'Warm': 'should_stop', + 'Cold': 'should_terminate'}[how], True) + else: + raise exc() + _handle_request.__name__ = str('worker_{0}'.format(how)) + platforms.signals[sig] = _handle_request +install_worker_term_handler = partial( + _shutdown_handler, sig='SIGTERM', how='Warm', exc=WorkerShutdown, +) +if not is_jython: # pragma: no cover + install_worker_term_hard_handler = partial( + _shutdown_handler, sig='SIGQUIT', how='Cold', exc=WorkerTerminate, + ) +else: # pragma: no cover + install_worker_term_handler = \ + install_worker_term_hard_handler = lambda *a, **kw: None + + +def on_SIGINT(worker): + safe_say('worker: Hitting Ctrl+C again will terminate all running tasks!') + install_worker_term_hard_handler(worker, sig='SIGINT') +if not is_jython: # pragma: no cover + install_worker_int_handler = partial( + _shutdown_handler, sig='SIGINT', callback=on_SIGINT + ) +else: # pragma: no cover + def install_worker_int_handler(*a, **kw): + pass + + +def _reload_current_worker(): + platforms.close_open_fds([ + sys.__stdin__, sys.__stdout__, sys.__stderr__, + ]) + os.execv(sys.executable, [sys.executable] + sys.argv) + + +def install_worker_restart_handler(worker, sig='SIGHUP'): + + def restart_worker_sig_handler(*args): + """Signal handler restarting the current python program.""" + set_in_sighandler(True) + safe_say('Restarting celery worker ({0})'.format(' '.join(sys.argv))) + import atexit + atexit.register(_reload_current_worker) + from celery.worker import state + state.should_stop = True + platforms.signals[sig] = restart_worker_sig_handler + + +def install_cry_handler(sig='SIGUSR1'): + # Jython/PyPy does not have sys._current_frames + if is_jython or is_pypy: # pragma: no cover + return + + def cry_handler(*args): + """Signal handler logging the stacktrace of all active threads.""" + with in_sighandler(): + safe_say(cry()) + platforms.signals[sig] = cry_handler + + +def install_rdb_handler(envvar='CELERY_RDBSIG', + sig='SIGUSR2'): # pragma: no cover + + def rdb_handler(*args): + """Signal handler setting a rdb breakpoint at the current frame.""" + with in_sighandler(): + from celery.contrib.rdb import set_trace, _frame + # gevent does not pass standard signal handler args + frame = args[1] if args else _frame().f_back + set_trace(frame) + if os.environ.get(envvar): + platforms.signals[sig] = rdb_handler + + +def install_HUP_not_supported_handler(worker, sig='SIGHUP'): + + def warn_on_HUP_handler(signum, frame): + with in_sighandler(): + safe_say('{sig} not supported: Restarting with {sig} is ' + 'unstable on this platform!'.format(sig=sig)) + platforms.signals[sig] = warn_on_HUP_handler diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/backends/__init__.py new file mode 100644 index 0000000..44ee3b7 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/backends/__init__.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +""" + celery.backends + ~~~~~~~~~~~~~~~ + + Backend abstract factory (...did I just say that?) and alias definitions. + +""" +from __future__ import absolute_import + +import sys +import types + +from celery.exceptions import ImproperlyConfigured +from celery.local import Proxy +from celery._state import current_app +from celery.five import reraise +from celery.utils.imports import symbol_by_name + +__all__ = ['get_backend_cls', 'get_backend_by_url'] + +UNKNOWN_BACKEND = """\ +Unknown result backend: {0!r}. Did you spell that correctly? ({1!r})\ +""" + +BACKEND_ALIASES = { + 'amqp': 'celery.backends.amqp:AMQPBackend', + 'rpc': 'celery.backends.rpc.RPCBackend', + 'cache': 'celery.backends.cache:CacheBackend', + 'redis': 'celery.backends.redis:RedisBackend', + 'mongodb': 'celery.backends.mongodb:MongoBackend', + 'db': 'celery.backends.database:DatabaseBackend', + 'database': 'celery.backends.database:DatabaseBackend', + 'cassandra': 'celery.backends.cassandra:CassandraBackend', + 'couchbase': 'celery.backends.couchbase:CouchBaseBackend', + 'disabled': 'celery.backends.base:DisabledBackend', +} + +#: deprecated alias to ``current_app.backend``. +default_backend = Proxy(lambda: current_app.backend) + + +def get_backend_cls(backend=None, loader=None): + """Get backend class by name/alias""" + backend = backend or 'disabled' + loader = loader or current_app.loader + aliases = dict(BACKEND_ALIASES, **loader.override_backends) + try: + cls = symbol_by_name(backend, aliases) + except ValueError as exc: + reraise(ImproperlyConfigured, ImproperlyConfigured( + UNKNOWN_BACKEND.format(backend, exc)), sys.exc_info()[2]) + if isinstance(cls, types.ModuleType): + raise ImproperlyConfigured(UNKNOWN_BACKEND.format( + backend, 'is a Python module, not a backend class.')) + return cls + + +def get_backend_by_url(backend=None, loader=None): + url = None + if backend and '://' in backend: + url = backend + scheme, _, _ = url.partition('://') + if '+' in scheme: + backend, url = url.split('+', 1) + else: + backend = scheme + return get_backend_cls(backend, loader), url diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/amqp.py b/thesisenv/lib/python3.6/site-packages/celery/backends/amqp.py new file mode 100644 index 0000000..6e7f778 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/backends/amqp.py @@ -0,0 +1,317 @@ +# -*- coding: utf-8 -*- +""" + celery.backends.amqp + ~~~~~~~~~~~~~~~~~~~~ + + The AMQP result backend. + + This backend publishes results as messages. + +""" +from __future__ import absolute_import + +import socket + +from collections import deque +from operator import itemgetter + +from kombu import Exchange, Queue, Producer, Consumer + +from celery import states +from celery.exceptions import TimeoutError +from celery.five import range, monotonic +from celery.utils.functional import dictfilter +from celery.utils.log import get_logger +from celery.utils.timeutils import maybe_s_to_ms + +from .base import BaseBackend + +__all__ = ['BacklogLimitExceeded', 'AMQPBackend'] + +logger = get_logger(__name__) + + +class BacklogLimitExceeded(Exception): + """Too much state history to fast-forward.""" + + +def repair_uuid(s): + # Historically the dashes in UUIDS are removed from AMQ entity names, + # but there is no known reason to. Hopefully we'll be able to fix + # this in v4.0. + return '%s-%s-%s-%s-%s' % (s[:8], s[8:12], s[12:16], s[16:20], s[20:]) + + +class NoCacheQueue(Queue): + can_cache_declaration = False + + +class AMQPBackend(BaseBackend): + """Publishes results by sending messages.""" + Exchange = Exchange + Queue = NoCacheQueue + Consumer = Consumer + Producer = Producer + + BacklogLimitExceeded = BacklogLimitExceeded + + persistent = True + supports_autoexpire = True + supports_native_join = True + + retry_policy = { + 'max_retries': 20, + 'interval_start': 0, + 'interval_step': 1, + 'interval_max': 1, + } + + def __init__(self, app, connection=None, exchange=None, exchange_type=None, + persistent=None, serializer=None, auto_delete=True, **kwargs): + super(AMQPBackend, self).__init__(app, **kwargs) + conf = self.app.conf + self._connection = connection + self.persistent = self.prepare_persistent(persistent) + self.delivery_mode = 2 if self.persistent else 1 + exchange = exchange or conf.CELERY_RESULT_EXCHANGE + exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE + self.exchange = self._create_exchange( + exchange, exchange_type, self.delivery_mode, + ) + self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER + self.auto_delete = auto_delete + + self.expires = None + if 'expires' not in kwargs or kwargs['expires'] is not None: + self.expires = self.prepare_expires(kwargs.get('expires')) + self.queue_arguments = dictfilter({ + 'x-expires': maybe_s_to_ms(self.expires), + }) + + def _create_exchange(self, name, type='direct', delivery_mode=2): + return self.Exchange(name=name, + type=type, + delivery_mode=delivery_mode, + durable=self.persistent, + auto_delete=False) + + def _create_binding(self, task_id): + name = self.rkey(task_id) + return self.Queue(name=name, + exchange=self.exchange, + routing_key=name, + durable=self.persistent, + auto_delete=self.auto_delete, + queue_arguments=self.queue_arguments) + + def revive(self, channel): + pass + + def rkey(self, task_id): + return task_id.replace('-', '') + + def destination_for(self, task_id, request): + if request: + return self.rkey(task_id), request.correlation_id or task_id + return self.rkey(task_id), task_id + + def store_result(self, task_id, result, status, + traceback=None, request=None, **kwargs): + """Send task return value and status.""" + routing_key, correlation_id = self.destination_for(task_id, request) + if not routing_key: + return + with self.app.amqp.producer_pool.acquire(block=True) as producer: + producer.publish( + {'task_id': task_id, 'status': status, + 'result': self.encode_result(result, status), + 'traceback': traceback, + 'children': self.current_task_children(request)}, + exchange=self.exchange, + routing_key=routing_key, + correlation_id=correlation_id, + serializer=self.serializer, + retry=True, retry_policy=self.retry_policy, + declare=self.on_reply_declare(task_id), + delivery_mode=self.delivery_mode, + ) + return result + + def on_reply_declare(self, task_id): + return [self._create_binding(task_id)] + + def wait_for(self, task_id, timeout=None, cache=True, + no_ack=True, on_interval=None, + READY_STATES=states.READY_STATES, + PROPAGATE_STATES=states.PROPAGATE_STATES, + **kwargs): + cached_meta = self._cache.get(task_id) + if cache and cached_meta and \ + cached_meta['status'] in READY_STATES: + return cached_meta + else: + try: + return self.consume(task_id, timeout=timeout, no_ack=no_ack, + on_interval=on_interval) + except socket.timeout: + raise TimeoutError('The operation timed out.') + + def get_task_meta(self, task_id, backlog_limit=1000): + # Polling and using basic_get + with self.app.pool.acquire_channel(block=True) as (_, channel): + binding = self._create_binding(task_id)(channel) + binding.declare() + + prev = latest = acc = None + for i in range(backlog_limit): # spool ffwd + acc = binding.get( + accept=self.accept, no_ack=False, + ) + if not acc: # no more messages + break + if acc.payload['task_id'] == task_id: + prev, latest = latest, acc + if prev: + # backends are not expected to keep history, + # so we delete everything except the most recent state. + prev.ack() + prev = None + else: + raise self.BacklogLimitExceeded(task_id) + + if latest: + payload = self._cache[task_id] = \ + self.meta_from_decoded(latest.payload) + latest.requeue() + return payload + else: + # no new state, use previous + try: + return self._cache[task_id] + except KeyError: + # result probably pending. + return {'status': states.PENDING, 'result': None} + poll = get_task_meta # XXX compat + + def drain_events(self, connection, consumer, + timeout=None, on_interval=None, now=monotonic, wait=None): + wait = wait or connection.drain_events + results = {} + + def callback(meta, message): + if meta['status'] in states.READY_STATES: + results[meta['task_id']] = self.meta_from_decoded(meta) + + consumer.callbacks[:] = [callback] + time_start = now() + + while 1: + # Total time spent may exceed a single call to wait() + if timeout and now() - time_start >= timeout: + raise socket.timeout() + try: + wait(timeout=1) + except socket.timeout: + pass + if on_interval: + on_interval() + if results: # got event on the wanted channel. + break + self._cache.update(results) + return results + + def consume(self, task_id, timeout=None, no_ack=True, on_interval=None): + wait = self.drain_events + with self.app.pool.acquire_channel(block=True) as (conn, channel): + binding = self._create_binding(task_id) + with self.Consumer(channel, binding, + no_ack=no_ack, accept=self.accept) as consumer: + while 1: + try: + return wait( + conn, consumer, timeout, on_interval)[task_id] + except KeyError: + continue + + def _many_bindings(self, ids): + return [self._create_binding(task_id) for task_id in ids] + + def get_many(self, task_ids, timeout=None, no_ack=True, + now=monotonic, getfields=itemgetter('status', 'task_id'), + READY_STATES=states.READY_STATES, + PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs): + with self.app.pool.acquire_channel(block=True) as (conn, channel): + ids = set(task_ids) + cached_ids = set() + mark_cached = cached_ids.add + for task_id in ids: + try: + cached = self._cache[task_id] + except KeyError: + pass + else: + if cached['status'] in READY_STATES: + yield task_id, cached + mark_cached(task_id) + ids.difference_update(cached_ids) + results = deque() + push_result = results.append + push_cache = self._cache.__setitem__ + decode_result = self.meta_from_decoded + + def on_message(message): + body = decode_result(message.decode()) + state, uid = getfields(body) + if state in READY_STATES: + push_result(body) \ + if uid in task_ids else push_cache(uid, body) + + bindings = self._many_bindings(task_ids) + with self.Consumer(channel, bindings, on_message=on_message, + accept=self.accept, no_ack=no_ack): + wait = conn.drain_events + popleft = results.popleft + while ids: + wait(timeout=timeout) + while results: + state = popleft() + task_id = state['task_id'] + ids.discard(task_id) + push_cache(task_id, state) + yield task_id, state + + def reload_task_result(self, task_id): + raise NotImplementedError( + 'reload_task_result is not supported by this backend.') + + def reload_group_result(self, task_id): + """Reload group result, even if it has been previously fetched.""" + raise NotImplementedError( + 'reload_group_result is not supported by this backend.') + + def save_group(self, group_id, result): + raise NotImplementedError( + 'save_group is not supported by this backend.') + + def restore_group(self, group_id, cache=True): + raise NotImplementedError( + 'restore_group is not supported by this backend.') + + def delete_group(self, group_id): + raise NotImplementedError( + 'delete_group is not supported by this backend.') + + def as_uri(self, include_password=True): + return 'amqp://' + + def __reduce__(self, args=(), kwargs={}): + kwargs.update( + connection=self._connection, + exchange=self.exchange.name, + exchange_type=self.exchange.type, + persistent=self.persistent, + serializer=self.serializer, + auto_delete=self.auto_delete, + expires=self.expires, + ) + return super(AMQPBackend, self).__reduce__(args, kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/base.py b/thesisenv/lib/python3.6/site-packages/celery/backends/base.py new file mode 100644 index 0000000..03b6909 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/backends/base.py @@ -0,0 +1,623 @@ +# -*- coding: utf-8 -*- +""" + celery.backends.base + ~~~~~~~~~~~~~~~~~~~~ + + Result backend base classes. + + - :class:`BaseBackend` defines the interface. + + - :class:`KeyValueStoreBackend` is a common base class + using K/V semantics like _get and _put. + +""" +from __future__ import absolute_import + +import time +import sys + +from datetime import timedelta + +from billiard.einfo import ExceptionInfo +from kombu.serialization import ( + dumps, loads, prepare_accept_content, + registry as serializer_registry, +) +from kombu.utils.encoding import bytes_to_str, ensure_bytes, from_utf8 +from kombu.utils.url import maybe_sanitize_url + +from celery import states +from celery import current_app, maybe_signature +from celery.app import current_task +from celery.exceptions import ChordError, TimeoutError, TaskRevokedError +from celery.five import items +from celery.result import ( + GroupResult, ResultBase, allow_join_result, result_from_tuple, +) +from celery.utils import timeutils +from celery.utils.functional import LRUCache +from celery.utils.log import get_logger +from celery.utils.serialization import ( + get_pickled_exception, + get_pickleable_exception, + create_exception_cls, +) + +__all__ = ['BaseBackend', 'KeyValueStoreBackend', 'DisabledBackend'] + +EXCEPTION_ABLE_CODECS = frozenset(['pickle']) +PY3 = sys.version_info >= (3, 0) + +logger = get_logger(__name__) + + +def unpickle_backend(cls, args, kwargs): + """Return an unpickled backend.""" + return cls(*args, app=current_app._get_current_object(), **kwargs) + + +class _nulldict(dict): + + def ignore(self, *a, **kw): + pass + __setitem__ = update = setdefault = ignore + + +class BaseBackend(object): + READY_STATES = states.READY_STATES + UNREADY_STATES = states.UNREADY_STATES + EXCEPTION_STATES = states.EXCEPTION_STATES + + TimeoutError = TimeoutError + + #: Time to sleep between polling each individual item + #: in `ResultSet.iterate`. as opposed to the `interval` + #: argument which is for each pass. + subpolling_interval = None + + #: If true the backend must implement :meth:`get_many`. + supports_native_join = False + + #: If true the backend must automatically expire results. + #: The daily backend_cleanup periodic task will not be triggered + #: in this case. + supports_autoexpire = False + + #: Set to true if the backend is peristent by default. + persistent = True + + retry_policy = { + 'max_retries': 20, + 'interval_start': 0, + 'interval_step': 1, + 'interval_max': 1, + } + + def __init__(self, app, + serializer=None, max_cached_results=None, accept=None, + url=None, **kwargs): + self.app = app + conf = self.app.conf + self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER + (self.content_type, + self.content_encoding, + self.encoder) = serializer_registry._encoders[self.serializer] + cmax = max_cached_results or conf.CELERY_MAX_CACHED_RESULTS + self._cache = _nulldict() if cmax == -1 else LRUCache(limit=cmax) + self.accept = prepare_accept_content( + conf.CELERY_ACCEPT_CONTENT if accept is None else accept, + ) + self.url = url + + def as_uri(self, include_password=False): + """Return the backend as an URI, sanitizing the password or not""" + # when using maybe_sanitize_url(), "/" is added + # we're stripping it for consistency + if include_password: + return self.url + url = maybe_sanitize_url(self.url or '') + return url[:-1] if url.endswith(':///') else url + + def mark_as_started(self, task_id, **meta): + """Mark a task as started""" + return self.store_result(task_id, meta, status=states.STARTED) + + def mark_as_done(self, task_id, result, request=None): + """Mark task as successfully executed.""" + return self.store_result(task_id, result, + status=states.SUCCESS, request=request) + + def mark_as_failure(self, task_id, exc, traceback=None, request=None): + """Mark task as executed with failure. Stores the exception.""" + return self.store_result(task_id, exc, status=states.FAILURE, + traceback=traceback, request=request) + + def chord_error_from_stack(self, callback, exc=None): + from celery import group + app = self.app + backend = app._tasks[callback.task].backend + try: + group( + [app.signature(errback) + for errback in callback.options.get('link_error') or []], + app=app, + ).apply_async((callback.id, )) + except Exception as eb_exc: + return backend.fail_from_current_stack(callback.id, exc=eb_exc) + else: + return backend.fail_from_current_stack(callback.id, exc=exc) + + def fail_from_current_stack(self, task_id, exc=None): + type_, real_exc, tb = sys.exc_info() + try: + exc = real_exc if exc is None else exc + ei = ExceptionInfo((type_, exc, tb)) + self.mark_as_failure(task_id, exc, ei.traceback) + return ei + finally: + del(tb) + + def mark_as_retry(self, task_id, exc, traceback=None, request=None): + """Mark task as being retries. Stores the current + exception (if any).""" + return self.store_result(task_id, exc, status=states.RETRY, + traceback=traceback, request=request) + + def mark_as_revoked(self, task_id, reason='', request=None): + return self.store_result(task_id, TaskRevokedError(reason), + status=states.REVOKED, traceback=None, + request=request) + + def prepare_exception(self, exc, serializer=None): + """Prepare exception for serialization.""" + serializer = self.serializer if serializer is None else serializer + if serializer in EXCEPTION_ABLE_CODECS: + return get_pickleable_exception(exc) + return {'exc_type': type(exc).__name__, 'exc_message': str(exc)} + + def exception_to_python(self, exc): + """Convert serialized exception to Python exception.""" + if exc: + if not isinstance(exc, BaseException): + exc = create_exception_cls( + from_utf8(exc['exc_type']), __name__)(exc['exc_message']) + if self.serializer in EXCEPTION_ABLE_CODECS: + exc = get_pickled_exception(exc) + return exc + + def prepare_value(self, result): + """Prepare value for storage.""" + if self.serializer != 'pickle' and isinstance(result, ResultBase): + return result.as_tuple() + return result + + def encode(self, data): + _, _, payload = dumps(data, serializer=self.serializer) + return payload + + def meta_from_decoded(self, meta): + if meta['status'] in self.EXCEPTION_STATES: + meta['result'] = self.exception_to_python(meta['result']) + return meta + + def decode_result(self, payload): + return self.meta_from_decoded(self.decode(payload)) + + def decode(self, payload): + payload = PY3 and payload or str(payload) + return loads(payload, + content_type=self.content_type, + content_encoding=self.content_encoding, + accept=self.accept) + + def wait_for(self, task_id, + timeout=None, interval=0.5, no_ack=True, on_interval=None): + """Wait for task and return its result. + + If the task raises an exception, this exception + will be re-raised by :func:`wait_for`. + + If `timeout` is not :const:`None`, this raises the + :class:`celery.exceptions.TimeoutError` exception if the operation + takes longer than `timeout` seconds. + + """ + + time_elapsed = 0.0 + + while 1: + meta = self.get_task_meta(task_id) + if meta['status'] in states.READY_STATES: + return meta + if on_interval: + on_interval() + # avoid hammering the CPU checking status. + time.sleep(interval) + time_elapsed += interval + if timeout and time_elapsed >= timeout: + raise TimeoutError('The operation timed out.') + + def prepare_expires(self, value, type=None): + if value is None: + value = self.app.conf.CELERY_TASK_RESULT_EXPIRES + if isinstance(value, timedelta): + value = timeutils.timedelta_seconds(value) + if value is not None and type: + return type(value) + return value + + def prepare_persistent(self, enabled=None): + if enabled is not None: + return enabled + p = self.app.conf.CELERY_RESULT_PERSISTENT + return self.persistent if p is None else p + + def encode_result(self, result, status): + if isinstance(result, ExceptionInfo): + result = result.exception + if status in self.EXCEPTION_STATES and isinstance(result, Exception): + return self.prepare_exception(result) + else: + return self.prepare_value(result) + + def is_cached(self, task_id): + return task_id in self._cache + + def store_result(self, task_id, result, status, + traceback=None, request=None, **kwargs): + """Update task state and result.""" + result = self.encode_result(result, status) + self._store_result(task_id, result, status, traceback, + request=request, **kwargs) + return result + + def forget(self, task_id): + self._cache.pop(task_id, None) + self._forget(task_id) + + def _forget(self, task_id): + raise NotImplementedError('backend does not implement forget.') + + def get_status(self, task_id): + """Get the status of a task.""" + return self.get_task_meta(task_id)['status'] + + def get_traceback(self, task_id): + """Get the traceback for a failed task.""" + return self.get_task_meta(task_id).get('traceback') + + def get_result(self, task_id): + """Get the result of a task.""" + return self.get_task_meta(task_id).get('result') + + def get_children(self, task_id): + """Get the list of subtasks sent by a task.""" + try: + return self.get_task_meta(task_id)['children'] + except KeyError: + pass + + def get_task_meta(self, task_id, cache=True): + if cache: + try: + return self._cache[task_id] + except KeyError: + pass + + meta = self._get_task_meta_for(task_id) + if cache and meta.get('status') == states.SUCCESS: + self._cache[task_id] = meta + return meta + + def reload_task_result(self, task_id): + """Reload task result, even if it has been previously fetched.""" + self._cache[task_id] = self.get_task_meta(task_id, cache=False) + + def reload_group_result(self, group_id): + """Reload group result, even if it has been previously fetched.""" + self._cache[group_id] = self.get_group_meta(group_id, cache=False) + + def get_group_meta(self, group_id, cache=True): + if cache: + try: + return self._cache[group_id] + except KeyError: + pass + + meta = self._restore_group(group_id) + if cache and meta is not None: + self._cache[group_id] = meta + return meta + + def restore_group(self, group_id, cache=True): + """Get the result for a group.""" + meta = self.get_group_meta(group_id, cache=cache) + if meta: + return meta['result'] + + def save_group(self, group_id, result): + """Store the result of an executed group.""" + return self._save_group(group_id, result) + + def delete_group(self, group_id): + self._cache.pop(group_id, None) + return self._delete_group(group_id) + + def cleanup(self): + """Backend cleanup. Is run by + :class:`celery.task.DeleteExpiredTaskMetaTask`.""" + pass + + def process_cleanup(self): + """Cleanup actions to do at the end of a task worker process.""" + pass + + def on_task_call(self, producer, task_id): + return {} + + def on_chord_part_return(self, task, state, result, propagate=False): + pass + + def fallback_chord_unlock(self, group_id, body, result=None, + countdown=1, **kwargs): + kwargs['result'] = [r.as_tuple() for r in result] + self.app.tasks['celery.chord_unlock'].apply_async( + (group_id, body, ), kwargs, countdown=countdown, + ) + + def apply_chord(self, header, partial_args, group_id, body, **options): + result = header(*partial_args, task_id=group_id) + self.fallback_chord_unlock(group_id, body, **options) + return result + + def current_task_children(self, request=None): + request = request or getattr(current_task(), 'request', None) + if request: + return [r.as_tuple() for r in getattr(request, 'children', [])] + + def __reduce__(self, args=(), kwargs={}): + return (unpickle_backend, (self.__class__, args, kwargs)) +BaseDictBackend = BaseBackend # XXX compat + + +class KeyValueStoreBackend(BaseBackend): + key_t = ensure_bytes + task_keyprefix = 'celery-task-meta-' + group_keyprefix = 'celery-taskset-meta-' + chord_keyprefix = 'chord-unlock-' + implements_incr = False + + def __init__(self, *args, **kwargs): + if hasattr(self.key_t, '__func__'): + self.key_t = self.key_t.__func__ # remove binding + self._encode_prefixes() + super(KeyValueStoreBackend, self).__init__(*args, **kwargs) + if self.implements_incr: + self.apply_chord = self._apply_chord_incr + + def _encode_prefixes(self): + self.task_keyprefix = self.key_t(self.task_keyprefix) + self.group_keyprefix = self.key_t(self.group_keyprefix) + self.chord_keyprefix = self.key_t(self.chord_keyprefix) + + def get(self, key): + raise NotImplementedError('Must implement the get method.') + + def mget(self, keys): + raise NotImplementedError('Does not support get_many') + + def set(self, key, value): + raise NotImplementedError('Must implement the set method.') + + def delete(self, key): + raise NotImplementedError('Must implement the delete method') + + def incr(self, key): + raise NotImplementedError('Does not implement incr') + + def expire(self, key, value): + pass + + def get_key_for_task(self, task_id, key=''): + """Get the cache key for a task by id.""" + key_t = self.key_t + return key_t('').join([ + self.task_keyprefix, key_t(task_id), key_t(key), + ]) + + def get_key_for_group(self, group_id, key=''): + """Get the cache key for a group by id.""" + key_t = self.key_t + return key_t('').join([ + self.group_keyprefix, key_t(group_id), key_t(key), + ]) + + def get_key_for_chord(self, group_id, key=''): + """Get the cache key for the chord waiting on group with given id.""" + key_t = self.key_t + return key_t('').join([ + self.chord_keyprefix, key_t(group_id), key_t(key), + ]) + + def _strip_prefix(self, key): + """Takes bytes, emits string.""" + key = self.key_t(key) + for prefix in self.task_keyprefix, self.group_keyprefix: + if key.startswith(prefix): + return bytes_to_str(key[len(prefix):]) + return bytes_to_str(key) + + def _filter_ready(self, values, READY_STATES=states.READY_STATES): + for k, v in values: + if v is not None: + v = self.decode_result(v) + if v['status'] in READY_STATES: + yield k, v + + def _mget_to_results(self, values, keys): + if hasattr(values, 'items'): + # client returns dict so mapping preserved. + return dict((self._strip_prefix(k), v) + for k, v in self._filter_ready(items(values))) + else: + # client returns list so need to recreate mapping. + return dict((bytes_to_str(keys[i]), v) + for i, v in self._filter_ready(enumerate(values))) + + def get_many(self, task_ids, timeout=None, interval=0.5, no_ack=True, + READY_STATES=states.READY_STATES): + interval = 0.5 if interval is None else interval + ids = task_ids if isinstance(task_ids, set) else set(task_ids) + cached_ids = set() + cache = self._cache + for task_id in ids: + try: + cached = cache[task_id] + except KeyError: + pass + else: + if cached['status'] in READY_STATES: + yield bytes_to_str(task_id), cached + cached_ids.add(task_id) + + ids.difference_update(cached_ids) + iterations = 0 + while ids: + keys = list(ids) + r = self._mget_to_results(self.mget([self.get_key_for_task(k) + for k in keys]), keys) + cache.update(r) + ids.difference_update(set(bytes_to_str(v) for v in r)) + for key, value in items(r): + yield bytes_to_str(key), value + if timeout and iterations * interval >= timeout: + raise TimeoutError('Operation timed out ({0})'.format(timeout)) + time.sleep(interval) # don't busy loop. + iterations += 1 + + def _forget(self, task_id): + self.delete(self.get_key_for_task(task_id)) + + def _store_result(self, task_id, result, status, + traceback=None, request=None, **kwargs): + meta = {'status': status, 'result': result, 'traceback': traceback, + 'children': self.current_task_children(request)} + self.set(self.get_key_for_task(task_id), self.encode(meta)) + return result + + def _save_group(self, group_id, result): + self.set(self.get_key_for_group(group_id), + self.encode({'result': result.as_tuple()})) + return result + + def _delete_group(self, group_id): + self.delete(self.get_key_for_group(group_id)) + + def _get_task_meta_for(self, task_id): + """Get task metadata for a task by id.""" + meta = self.get(self.get_key_for_task(task_id)) + if not meta: + return {'status': states.PENDING, 'result': None} + return self.decode_result(meta) + + def _restore_group(self, group_id): + """Get task metadata for a task by id.""" + meta = self.get(self.get_key_for_group(group_id)) + # previously this was always pickled, but later this + # was extended to support other serializers, so the + # structure is kind of weird. + if meta: + meta = self.decode(meta) + result = meta['result'] + meta['result'] = result_from_tuple(result, self.app) + return meta + + def _apply_chord_incr(self, header, partial_args, group_id, body, + result=None, **options): + self.save_group(group_id, self.app.GroupResult(group_id, result)) + return header(*partial_args, task_id=group_id) + + def on_chord_part_return(self, task, state, result, propagate=None): + if not self.implements_incr: + return + app = self.app + if propagate is None: + propagate = app.conf.CELERY_CHORD_PROPAGATES + gid = task.request.group + if not gid: + return + key = self.get_key_for_chord(gid) + try: + deps = GroupResult.restore(gid, backend=task.backend) + except Exception as exc: + callback = maybe_signature(task.request.chord, app=app) + logger.error('Chord %r raised: %r', gid, exc, exc_info=1) + return self.chord_error_from_stack( + callback, + ChordError('Cannot restore group: {0!r}'.format(exc)), + ) + if deps is None: + try: + raise ValueError(gid) + except ValueError as exc: + callback = maybe_signature(task.request.chord, app=app) + logger.error('Chord callback %r raised: %r', gid, exc, + exc_info=1) + return self.chord_error_from_stack( + callback, + ChordError('GroupResult {0} no longer exists'.format(gid)), + ) + val = self.incr(key) + size = len(deps) + if val > size: + logger.warning('Chord counter incremented too many times for %r', + gid) + elif val == size: + callback = maybe_signature(task.request.chord, app=app) + j = deps.join_native if deps.supports_native_join else deps.join + try: + with allow_join_result(): + ret = j(timeout=3.0, propagate=propagate) + except Exception as exc: + try: + culprit = next(deps._failed_join_report()) + reason = 'Dependency {0.id} raised {1!r}'.format( + culprit, exc, + ) + except StopIteration: + reason = repr(exc) + + logger.error('Chord %r raised: %r', gid, reason, exc_info=1) + self.chord_error_from_stack(callback, ChordError(reason)) + else: + try: + callback.delay(ret) + except Exception as exc: + logger.error('Chord %r raised: %r', gid, exc, exc_info=1) + self.chord_error_from_stack( + callback, + ChordError('Callback error: {0!r}'.format(exc)), + ) + finally: + deps.delete() + self.client.delete(key) + else: + self.expire(key, 86400) + + +class DisabledBackend(BaseBackend): + _cache = {} # need this attribute to reset cache in tests. + + def store_result(self, *args, **kwargs): + pass + + def _is_disabled(self, *args, **kwargs): + raise NotImplementedError( + 'No result backend configured. ' + 'Please see the documentation for more information.') + + def as_uri(self, *args, **kwargs): + return 'disabled://' + + get_state = get_status = get_result = get_traceback = _is_disabled + wait_for = get_many = _is_disabled diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/cache.py b/thesisenv/lib/python3.6/site-packages/celery/backends/cache.py new file mode 100644 index 0000000..3c8230c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/backends/cache.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- +""" + celery.backends.cache + ~~~~~~~~~~~~~~~~~~~~~ + + Memcache and in-memory cache result backend. + +""" +from __future__ import absolute_import + +import sys + +from kombu.utils import cached_property +from kombu.utils.encoding import bytes_to_str, ensure_bytes + +from celery.exceptions import ImproperlyConfigured +from celery.utils.functional import LRUCache + +from .base import KeyValueStoreBackend + +__all__ = ['CacheBackend'] + +_imp = [None] + +PY3 = sys.version_info[0] == 3 + +REQUIRES_BACKEND = """\ +The memcached backend requires either pylibmc or python-memcached.\ +""" + +UNKNOWN_BACKEND = """\ +The cache backend {0!r} is unknown, +Please use one of the following backends instead: {1}\ +""" + + +def import_best_memcache(): + if _imp[0] is None: + is_pylibmc, memcache_key_t = False, ensure_bytes + try: + import pylibmc as memcache + is_pylibmc = True + except ImportError: + try: + import memcache # noqa + except ImportError: + raise ImproperlyConfigured(REQUIRES_BACKEND) + if PY3: + memcache_key_t = bytes_to_str + _imp[0] = (is_pylibmc, memcache, memcache_key_t) + return _imp[0] + + +def get_best_memcache(*args, **kwargs): + is_pylibmc, memcache, key_t = import_best_memcache() + Client = _Client = memcache.Client + + if not is_pylibmc: + def Client(*args, **kwargs): # noqa + kwargs.pop('behaviors', None) + return _Client(*args, **kwargs) + + return Client, key_t + + +class DummyClient(object): + + def __init__(self, *args, **kwargs): + self.cache = LRUCache(limit=5000) + + def get(self, key, *args, **kwargs): + return self.cache.get(key) + + def get_multi(self, keys): + cache = self.cache + return dict((k, cache[k]) for k in keys if k in cache) + + def set(self, key, value, *args, **kwargs): + self.cache[key] = value + + def delete(self, key, *args, **kwargs): + self.cache.pop(key, None) + + def incr(self, key, delta=1): + return self.cache.incr(key, delta) + + +backends = {'memcache': get_best_memcache, + 'memcached': get_best_memcache, + 'pylibmc': get_best_memcache, + 'memory': lambda: (DummyClient, ensure_bytes)} + + +class CacheBackend(KeyValueStoreBackend): + servers = None + supports_autoexpire = True + supports_native_join = True + implements_incr = True + + def __init__(self, app, expires=None, backend=None, + options={}, url=None, **kwargs): + super(CacheBackend, self).__init__(app, **kwargs) + self.url = url + + self.options = dict(self.app.conf.CELERY_CACHE_BACKEND_OPTIONS, + **options) + + self.backend = url or backend or self.app.conf.CELERY_CACHE_BACKEND + if self.backend: + self.backend, _, servers = self.backend.partition('://') + self.servers = servers.rstrip('/').split(';') + self.expires = self.prepare_expires(expires, type=int) + try: + self.Client, self.key_t = backends[self.backend]() + except KeyError: + raise ImproperlyConfigured(UNKNOWN_BACKEND.format( + self.backend, ', '.join(backends))) + self._encode_prefixes() # rencode the keyprefixes + + def get(self, key): + return self.client.get(key) + + def mget(self, keys): + return self.client.get_multi(keys) + + def set(self, key, value): + return self.client.set(key, value, self.expires) + + def delete(self, key): + return self.client.delete(key) + + def _apply_chord_incr(self, header, partial_args, group_id, body, **opts): + self.client.set(self.get_key_for_chord(group_id), 0, time=86400) + return super(CacheBackend, self)._apply_chord_incr( + header, partial_args, group_id, body, **opts + ) + + def incr(self, key): + return self.client.incr(key) + + @cached_property + def client(self): + return self.Client(self.servers, **self.options) + + def __reduce__(self, args=(), kwargs={}): + servers = ';'.join(self.servers) + backend = '{0}://{1}/'.format(self.backend, servers) + kwargs.update( + dict(backend=backend, + expires=self.expires, + options=self.options)) + return super(CacheBackend, self).__reduce__(args, kwargs) + + def as_uri(self, *args, **kwargs): + """Return the backend as an URI. + + This properly handles the case of multiple servers. + + """ + servers = ';'.join(self.servers) + return '{0}://{1}/'.format(self.backend, servers) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/cassandra.py b/thesisenv/lib/python3.6/site-packages/celery/backends/cassandra.py new file mode 100644 index 0000000..79f17ee --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/backends/cassandra.py @@ -0,0 +1,196 @@ +# -* coding: utf-8 -*- +""" + celery.backends.cassandra + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Apache Cassandra result store backend. + +""" +from __future__ import absolute_import + +try: # pragma: no cover + import pycassa + from thrift import Thrift + C = pycassa.cassandra.ttypes +except ImportError: # pragma: no cover + pycassa = None # noqa + +import socket +import time + +from celery import states +from celery.exceptions import ImproperlyConfigured +from celery.five import monotonic +from celery.utils.log import get_logger +from celery.utils.timeutils import maybe_timedelta, timedelta_seconds + +from .base import BaseBackend + +__all__ = ['CassandraBackend'] + +logger = get_logger(__name__) + + +class CassandraBackend(BaseBackend): + """Highly fault tolerant Cassandra backend. + + .. attribute:: servers + + List of Cassandra servers with format: ``hostname:port``. + + :raises celery.exceptions.ImproperlyConfigured: if + module :mod:`pycassa` is not available. + + """ + servers = [] + keyspace = None + column_family = None + detailed_mode = False + _retry_timeout = 300 + _retry_wait = 3 + supports_autoexpire = True + + def __init__(self, servers=None, keyspace=None, column_family=None, + cassandra_options=None, detailed_mode=False, **kwargs): + """Initialize Cassandra backend. + + Raises :class:`celery.exceptions.ImproperlyConfigured` if + the :setting:`CASSANDRA_SERVERS` setting is not set. + + """ + super(CassandraBackend, self).__init__(**kwargs) + + self.expires = kwargs.get('expires') or maybe_timedelta( + self.app.conf.CELERY_TASK_RESULT_EXPIRES) + + if not pycassa: + raise ImproperlyConfigured( + 'You need to install the pycassa library to use the ' + 'Cassandra backend. See https://github.com/pycassa/pycassa') + + conf = self.app.conf + self.servers = (servers or + conf.get('CASSANDRA_SERVERS') or + self.servers) + self.keyspace = (keyspace or + conf.get('CASSANDRA_KEYSPACE') or + self.keyspace) + self.column_family = (column_family or + conf.get('CASSANDRA_COLUMN_FAMILY') or + self.column_family) + self.cassandra_options = dict(conf.get('CASSANDRA_OPTIONS') or {}, + **cassandra_options or {}) + self.detailed_mode = (detailed_mode or + conf.get('CASSANDRA_DETAILED_MODE') or + self.detailed_mode) + read_cons = conf.get('CASSANDRA_READ_CONSISTENCY') or 'LOCAL_QUORUM' + write_cons = conf.get('CASSANDRA_WRITE_CONSISTENCY') or 'LOCAL_QUORUM' + try: + self.read_consistency = getattr(pycassa.ConsistencyLevel, + read_cons) + except AttributeError: + self.read_consistency = pycassa.ConsistencyLevel.LOCAL_QUORUM + try: + self.write_consistency = getattr(pycassa.ConsistencyLevel, + write_cons) + except AttributeError: + self.write_consistency = pycassa.ConsistencyLevel.LOCAL_QUORUM + + if not self.servers or not self.keyspace or not self.column_family: + raise ImproperlyConfigured( + 'Cassandra backend not configured.') + + self._column_family = None + + def _retry_on_error(self, fun, *args, **kwargs): + ts = monotonic() + self._retry_timeout + while 1: + try: + return fun(*args, **kwargs) + except (pycassa.InvalidRequestException, + pycassa.TimedOutException, + pycassa.UnavailableException, + pycassa.AllServersUnavailable, + socket.error, + socket.timeout, + Thrift.TException) as exc: + if monotonic() > ts: + raise + logger.warning('Cassandra error: %r. Retrying...', exc) + time.sleep(self._retry_wait) + + def _get_column_family(self): + if self._column_family is None: + conn = pycassa.ConnectionPool(self.keyspace, + server_list=self.servers, + **self.cassandra_options) + self._column_family = pycassa.ColumnFamily( + conn, self.column_family, + read_consistency_level=self.read_consistency, + write_consistency_level=self.write_consistency, + ) + return self._column_family + + def process_cleanup(self): + if self._column_family is not None: + self._column_family = None + + def _store_result(self, task_id, result, status, + traceback=None, request=None, **kwargs): + """Store return value and status of an executed task.""" + + def _do_store(): + cf = self._get_column_family() + date_done = self.app.now() + meta = {'status': status, + 'date_done': date_done.strftime('%Y-%m-%dT%H:%M:%SZ'), + 'traceback': self.encode(traceback), + 'result': self.encode(result), + 'children': self.encode( + self.current_task_children(request), + )} + if self.detailed_mode: + cf.insert(task_id, {date_done: self.encode(meta)}, + ttl=self.expires and timedelta_seconds(self.expires)) + else: + cf.insert(task_id, meta, + ttl=self.expires and timedelta_seconds(self.expires)) + + return self._retry_on_error(_do_store) + + def as_uri(self, include_password=True): + return 'cassandra://' + + def _get_task_meta_for(self, task_id): + """Get task metadata for a task by id.""" + + def _do_get(): + cf = self._get_column_family() + try: + if self.detailed_mode: + row = cf.get(task_id, column_reversed=True, column_count=1) + obj = self.decode(list(row.values())[0]) + else: + obj = cf.get(task_id) + + meta = { + 'task_id': task_id, + 'status': obj['status'], + 'result': self.decode(obj['result']), + 'date_done': obj['date_done'], + 'traceback': self.decode(obj['traceback']), + 'children': self.decode(obj['children']), + } + except (KeyError, pycassa.NotFoundException): + meta = {'status': states.PENDING, 'result': None} + return meta + + return self._retry_on_error(_do_get) + + def __reduce__(self, args=(), kwargs={}): + kwargs.update( + dict(servers=self.servers, + keyspace=self.keyspace, + column_family=self.column_family, + cassandra_options=self.cassandra_options)) + return super(CassandraBackend, self).__reduce__(args, kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/couchbase.py b/thesisenv/lib/python3.6/site-packages/celery/backends/couchbase.py new file mode 100644 index 0000000..cd7555e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/backends/couchbase.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +""" + celery.backends.couchbase + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + CouchBase result store backend. + +""" +from __future__ import absolute_import + +import logging + +try: + from couchbase import Couchbase + from couchbase.connection import Connection + from couchbase.exceptions import NotFoundError +except ImportError: + Couchbase = Connection = NotFoundError = None # noqa + +from kombu.utils.url import _parse_url + +from celery.exceptions import ImproperlyConfigured +from celery.utils.timeutils import maybe_timedelta + +from .base import KeyValueStoreBackend + +__all__ = ['CouchBaseBackend'] + + +class CouchBaseBackend(KeyValueStoreBackend): + """CouchBase backend. + + :raises celery.exceptions.ImproperlyConfigured: if + module :mod:`couchbase` is not available. + + """ + bucket = 'default' + host = 'localhost' + port = 8091 + username = None + password = None + quiet = False + conncache = None + unlock_gil = True + timeout = 2.5 + transcoder = None + + def __init__(self, url=None, *args, **kwargs): + super(CouchBaseBackend, self).__init__(*args, **kwargs) + self.url = url + + self.expires = kwargs.get('expires') or maybe_timedelta( + self.app.conf.CELERY_TASK_RESULT_EXPIRES) + + if Couchbase is None: + raise ImproperlyConfigured( + 'You need to install the couchbase library to use the ' + 'CouchBase backend.', + ) + + uhost = uport = uname = upass = ubucket = None + if url: + _, uhost, uport, uname, upass, ubucket, _ = _parse_url(url) + ubucket = ubucket.strip('/') if ubucket else None + + config = self.app.conf.get('CELERY_COUCHBASE_BACKEND_SETTINGS', None) + if config is not None: + if not isinstance(config, dict): + raise ImproperlyConfigured( + 'Couchbase backend settings should be grouped in a dict', + ) + else: + config = {} + + self.host = uhost or config.get('host', self.host) + self.port = int(uport or config.get('port', self.port)) + self.bucket = ubucket or config.get('bucket', self.bucket) + self.username = uname or config.get('username', self.username) + self.password = upass or config.get('password', self.password) + + self._connection = None + + def _get_connection(self): + """Connect to the Couchbase server.""" + if self._connection is None: + kwargs = {'bucket': self.bucket, 'host': self.host} + + if self.port: + kwargs.update({'port': self.port}) + if self.username: + kwargs.update({'username': self.username}) + if self.password: + kwargs.update({'password': self.password}) + + logging.debug('couchbase settings %r', kwargs) + self._connection = Connection(**kwargs) + return self._connection + + @property + def connection(self): + return self._get_connection() + + def get(self, key): + try: + return self.connection.get(key).value + except NotFoundError: + return None + + def set(self, key, value): + self.connection.set(key, value) + + def mget(self, keys): + return [self.get(key) for key in keys] + + def delete(self, key): + self.connection.delete(key) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/database/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/backends/database/__init__.py new file mode 100644 index 0000000..f47fdd5 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/backends/database/__init__.py @@ -0,0 +1,201 @@ +# -*- coding: utf-8 -*- +""" + celery.backends.database + ~~~~~~~~~~~~~~~~~~~~~~~~ + + SQLAlchemy result store backend. + +""" +from __future__ import absolute_import + +import logging +from contextlib import contextmanager +from functools import wraps + +from celery import states +from celery.backends.base import BaseBackend +from celery.exceptions import ImproperlyConfigured +from celery.five import range +from celery.utils.timeutils import maybe_timedelta + +from .models import Task +from .models import TaskSet +from .session import SessionManager + +logger = logging.getLogger(__name__) + +__all__ = ['DatabaseBackend'] + + +def _sqlalchemy_installed(): + try: + import sqlalchemy + except ImportError: + raise ImproperlyConfigured( + 'The database result backend requires SQLAlchemy to be installed.' + 'See http://pypi.python.org/pypi/SQLAlchemy') + return sqlalchemy +_sqlalchemy_installed() + +from sqlalchemy.exc import DatabaseError, InvalidRequestError # noqa +from sqlalchemy.orm.exc import StaleDataError # noqa + + +@contextmanager +def session_cleanup(session): + try: + yield + except Exception: + session.rollback() + raise + finally: + session.close() + + +def retry(fun): + + @wraps(fun) + def _inner(*args, **kwargs): + max_retries = kwargs.pop('max_retries', 3) + + for retries in range(max_retries): + try: + return fun(*args, **kwargs) + except (DatabaseError, InvalidRequestError, StaleDataError): + logger.warning( + "Failed operation %s. Retrying %s more times.", + fun.__name__, max_retries - retries - 1, + exc_info=True, + ) + if retries + 1 >= max_retries: + raise + + return _inner + + +class DatabaseBackend(BaseBackend): + """The database result backend.""" + # ResultSet.iterate should sleep this much between each pool, + # to not bombard the database with queries. + subpolling_interval = 0.5 + + def __init__(self, dburi=None, expires=None, + engine_options=None, url=None, **kwargs): + # The `url` argument was added later and is used by + # the app to set backend by url (celery.backends.get_backend_by_url) + super(DatabaseBackend, self).__init__(**kwargs) + conf = self.app.conf + self.expires = maybe_timedelta(self.prepare_expires(expires)) + self.url = url or dburi or conf.CELERY_RESULT_DBURI + self.engine_options = dict( + engine_options or {}, + **conf.CELERY_RESULT_ENGINE_OPTIONS or {}) + self.short_lived_sessions = kwargs.get( + 'short_lived_sessions', + conf.CELERY_RESULT_DB_SHORT_LIVED_SESSIONS, + ) + + tablenames = conf.CELERY_RESULT_DB_TABLENAMES or {} + Task.__table__.name = tablenames.get('task', 'celery_taskmeta') + TaskSet.__table__.name = tablenames.get('group', 'celery_tasksetmeta') + + if not self.url: + raise ImproperlyConfigured( + 'Missing connection string! Do you have ' + 'CELERY_RESULT_DBURI set to a real value?') + + def ResultSession(self, session_manager=SessionManager()): + return session_manager.session_factory( + dburi=self.url, + short_lived_sessions=self.short_lived_sessions, + **self.engine_options + ) + + @retry + def _store_result(self, task_id, result, status, + traceback=None, max_retries=3, **kwargs): + """Store return value and status of an executed task.""" + session = self.ResultSession() + with session_cleanup(session): + task = list(session.query(Task).filter(Task.task_id == task_id)) + task = task and task[0] + if not task: + task = Task(task_id) + session.add(task) + session.flush() + task.result = result + task.status = status + task.traceback = traceback + session.commit() + return result + + @retry + def _get_task_meta_for(self, task_id): + """Get task metadata for a task by id.""" + session = self.ResultSession() + with session_cleanup(session): + task = list(session.query(Task).filter(Task.task_id == task_id)) + task = task and task[0] + if not task: + task = Task(task_id) + task.status = states.PENDING + task.result = None + return self.meta_from_decoded(task.to_dict()) + + @retry + def _save_group(self, group_id, result): + """Store the result of an executed group.""" + session = self.ResultSession() + with session_cleanup(session): + group = TaskSet(group_id, result) + session.add(group) + session.flush() + session.commit() + return result + + @retry + def _restore_group(self, group_id): + """Get metadata for group by id.""" + session = self.ResultSession() + with session_cleanup(session): + group = session.query(TaskSet).filter( + TaskSet.taskset_id == group_id).first() + if group: + return group.to_dict() + + @retry + def _delete_group(self, group_id): + """Delete metadata for group by id.""" + session = self.ResultSession() + with session_cleanup(session): + session.query(TaskSet).filter( + TaskSet.taskset_id == group_id).delete() + session.flush() + session.commit() + + @retry + def _forget(self, task_id): + """Forget about result.""" + session = self.ResultSession() + with session_cleanup(session): + session.query(Task).filter(Task.task_id == task_id).delete() + session.commit() + + def cleanup(self): + """Delete expired metadata.""" + session = self.ResultSession() + expires = self.expires + now = self.app.now() + with session_cleanup(session): + session.query(Task).filter( + Task.date_done < (now - expires)).delete() + session.query(TaskSet).filter( + TaskSet.date_done < (now - expires)).delete() + session.commit() + + def __reduce__(self, args=(), kwargs={}): + kwargs.update( + dict(dburi=self.url, + expires=self.expires, + engine_options=self.engine_options)) + return super(DatabaseBackend, self).__reduce__(args, kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/database/models.py b/thesisenv/lib/python3.6/site-packages/celery/backends/database/models.py new file mode 100644 index 0000000..2802a00 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/backends/database/models.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +""" + celery.backends.database.models + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Database tables for the SQLAlchemy result store backend. + +""" +from __future__ import absolute_import + +from datetime import datetime + +import sqlalchemy as sa +from sqlalchemy.types import PickleType + +from celery import states + +from .session import ResultModelBase + +__all__ = ['Task', 'TaskSet'] + + +class Task(ResultModelBase): + """Task result/status.""" + __tablename__ = 'celery_taskmeta' + __table_args__ = {'sqlite_autoincrement': True} + + id = sa.Column(sa.Integer, sa.Sequence('task_id_sequence'), + primary_key=True, + autoincrement=True) + task_id = sa.Column(sa.String(255), unique=True) + status = sa.Column(sa.String(50), default=states.PENDING) + result = sa.Column(PickleType, nullable=True) + date_done = sa.Column(sa.DateTime, default=datetime.utcnow, + onupdate=datetime.utcnow, nullable=True) + traceback = sa.Column(sa.Text, nullable=True) + + def __init__(self, task_id): + self.task_id = task_id + + def to_dict(self): + return {'task_id': self.task_id, + 'status': self.status, + 'result': self.result, + 'traceback': self.traceback, + 'date_done': self.date_done} + + def __repr__(self): + return ''.format(self) + + +class TaskSet(ResultModelBase): + """TaskSet result""" + __tablename__ = 'celery_tasksetmeta' + __table_args__ = {'sqlite_autoincrement': True} + + id = sa.Column(sa.Integer, sa.Sequence('taskset_id_sequence'), + autoincrement=True, primary_key=True) + taskset_id = sa.Column(sa.String(255), unique=True) + result = sa.Column(PickleType, nullable=True) + date_done = sa.Column(sa.DateTime, default=datetime.utcnow, + nullable=True) + + def __init__(self, taskset_id, result): + self.taskset_id = taskset_id + self.result = result + + def to_dict(self): + return {'taskset_id': self.taskset_id, + 'result': self.result, + 'date_done': self.date_done} + + def __repr__(self): + return ''.format(self) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/database/session.py b/thesisenv/lib/python3.6/site-packages/celery/backends/database/session.py new file mode 100644 index 0000000..1575d7f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/backends/database/session.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +""" + celery.backends.database.session + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + SQLAlchemy sessions. + +""" +from __future__ import absolute_import + +from billiard.util import register_after_fork + +from sqlalchemy import create_engine +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker +from sqlalchemy.pool import NullPool + +ResultModelBase = declarative_base() + +__all__ = ['SessionManager'] + + +class SessionManager(object): + def __init__(self): + self._engines = {} + self._sessions = {} + self.forked = False + self.prepared = False + register_after_fork(self, self._after_fork) + + def _after_fork(self,): + self.forked = True + + def get_engine(self, dburi, **kwargs): + if self.forked: + try: + return self._engines[dburi] + except KeyError: + engine = self._engines[dburi] = create_engine(dburi, **kwargs) + return engine + else: + kwargs['poolclass'] = NullPool + return create_engine(dburi, **kwargs) + + def create_session(self, dburi, short_lived_sessions=False, **kwargs): + engine = self.get_engine(dburi, **kwargs) + if self.forked: + if short_lived_sessions or dburi not in self._sessions: + self._sessions[dburi] = sessionmaker(bind=engine) + return engine, self._sessions[dburi] + else: + return engine, sessionmaker(bind=engine) + + def prepare_models(self, engine): + if not self.prepared: + ResultModelBase.metadata.create_all(engine) + self.prepared = True + + def session_factory(self, dburi, **kwargs): + engine, session = self.create_session(dburi, **kwargs) + self.prepare_models(engine) + return session() diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/mongodb.py b/thesisenv/lib/python3.6/site-packages/celery/backends/mongodb.py new file mode 100644 index 0000000..281c38c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/backends/mongodb.py @@ -0,0 +1,264 @@ +# -*- coding: utf-8 -*- +""" + celery.backends.mongodb + ~~~~~~~~~~~~~~~~~~~~~~~ + + MongoDB result store backend. + +""" +from __future__ import absolute_import + +from datetime import datetime + +from kombu.syn import detect_environment +from kombu.utils import cached_property +from kombu.utils.url import maybe_sanitize_url + +from celery import states +from celery.exceptions import ImproperlyConfigured +from celery.five import items, string_t +from celery.utils.timeutils import maybe_timedelta + +from .base import BaseBackend + +try: + import pymongo +except ImportError: # pragma: no cover + pymongo = None # noqa + +if pymongo: + try: + from bson.binary import Binary + except ImportError: # pragma: no cover + from pymongo.binary import Binary # noqa +else: # pragma: no cover + Binary = None # noqa + +__all__ = ['MongoBackend'] + + +class MongoBackend(BaseBackend): + """MongoDB result backend. + + :raises celery.exceptions.ImproperlyConfigured: if + module :mod:`pymongo` is not available. + + """ + + host = 'localhost' + port = 27017 + user = None + password = None + database_name = 'celery' + taskmeta_collection = 'celery_taskmeta' + max_pool_size = 10 + options = None + + supports_autoexpire = False + + _connection = None + + def __init__(self, app=None, url=None, **kwargs): + self.options = {} + super(MongoBackend, self).__init__(app, **kwargs) + self.expires = kwargs.get('expires') or maybe_timedelta( + self.app.conf.CELERY_TASK_RESULT_EXPIRES) + + if not pymongo: + raise ImproperlyConfigured( + 'You need to install the pymongo library to use the ' + 'MongoDB backend.') + + config = self.app.conf.get('CELERY_MONGODB_BACKEND_SETTINGS') + if config is not None: + if not isinstance(config, dict): + raise ImproperlyConfigured( + 'MongoDB backend settings should be grouped in a dict') + config = dict(config) # do not modify original + + self.host = config.pop('host', self.host) + self.port = int(config.pop('port', self.port)) + self.user = config.pop('user', self.user) + self.password = config.pop('password', self.password) + self.database_name = config.pop('database', self.database_name) + self.taskmeta_collection = config.pop( + 'taskmeta_collection', self.taskmeta_collection, + ) + + self.options = dict(config, **config.pop('options', None) or {}) + + # Set option defaults + for key, value in items(self._prepare_client_options()): + self.options.setdefault(key, value) + + self.url = url + if self.url: + # Specifying backend as an URL + self.host = self.url + + def _prepare_client_options(self): + if pymongo.version_tuple >= (3, ): + return {'maxPoolSize': self.max_pool_size} + else: # pragma: no cover + options = { + 'max_pool_size': self.max_pool_size, + 'auto_start_request': False + } + if detect_environment() != 'default': + options['use_greenlets'] = True + return options + + def _get_connection(self): + """Connect to the MongoDB server.""" + if self._connection is None: + from pymongo import MongoClient + + # The first pymongo.Connection() argument (host) can be + # a list of ['host:port'] elements or a mongodb connection + # URI. If this is the case, don't use self.port + # but let pymongo get the port(s) from the URI instead. + # This enables the use of replica sets and sharding. + # See pymongo.Connection() for more info. + url = self.host + if isinstance(url, string_t) \ + and not url.startswith('mongodb://'): + url = 'mongodb://{0}:{1}'.format(url, self.port) + if url == 'mongodb://': + url = url + 'localhost' + self._connection = MongoClient(host=url, **self.options) + + return self._connection + + def process_cleanup(self): + if self._connection is not None: + # MongoDB connection will be closed automatically when object + # goes out of scope + del(self.collection) + del(self.database) + self._connection = None + + def _store_result(self, task_id, result, status, + traceback=None, request=None, **kwargs): + """Store return value and status of an executed task.""" + meta = {'_id': task_id, + 'status': status, + 'result': Binary(self.encode(result)), + 'date_done': datetime.utcnow(), + 'traceback': Binary(self.encode(traceback)), + 'children': Binary(self.encode( + self.current_task_children(request), + ))} + self.collection.save(meta) + + return result + + def _get_task_meta_for(self, task_id): + """Get task metadata for a task by id.""" + + obj = self.collection.find_one({'_id': task_id}) + if not obj: + return {'status': states.PENDING, 'result': None} + + meta = { + 'task_id': obj['_id'], + 'status': obj['status'], + 'result': self.decode(obj['result']), + 'date_done': obj['date_done'], + 'traceback': self.decode(obj['traceback']), + 'children': self.decode(obj['children']), + } + + return meta + + def _save_group(self, group_id, result): + """Save the group result.""" + meta = {'_id': group_id, + 'result': Binary(self.encode(result)), + 'date_done': datetime.utcnow()} + self.collection.save(meta) + + return result + + def _restore_group(self, group_id): + """Get the result for a group by id.""" + obj = self.collection.find_one({'_id': group_id}) + if not obj: + return + + meta = { + 'task_id': obj['_id'], + 'result': self.decode(obj['result']), + 'date_done': obj['date_done'], + } + + return meta + + def _delete_group(self, group_id): + """Delete a group by id.""" + self.collection.remove({'_id': group_id}) + + def _forget(self, task_id): + """Remove result from MongoDB. + + :raises celery.exceptions.OperationsError: + if the task_id could not be removed. + + """ + # By using safe=True, this will wait until it receives a response from + # the server. Likewise, it will raise an OperationsError if the + # response was unable to be completed. + self.collection.remove({'_id': task_id}) + + def cleanup(self): + """Delete expired metadata.""" + self.collection.remove( + {'date_done': {'$lt': self.app.now() - self.expires}}, + ) + + def __reduce__(self, args=(), kwargs={}): + return super(MongoBackend, self).__reduce__( + args, dict(kwargs, expires=self.expires, url=self.url), + ) + + def _get_database(self): + conn = self._get_connection() + db = conn[self.database_name] + if self.user and self.password: + if not db.authenticate(self.user, + self.password): + raise ImproperlyConfigured( + 'Invalid MongoDB username or password.') + return db + + @cached_property + def database(self): + """Get database from MongoDB connection and perform authentication + if necessary.""" + return self._get_database() + + @cached_property + def collection(self): + """Get the metadata task collection.""" + collection = self.database[self.taskmeta_collection] + + # Ensure an index on date_done is there, if not process the index + # in the background. Once completed cleanup will be much faster + collection.ensure_index('date_done', background='true') + return collection + + def as_uri(self, include_password=False): + """Return the backend as an URI. + + :keyword include_password: Censor passwords. + + """ + if not self.url: + return 'mongodb://' + if include_password: + return self.url + + if ',' not in self.url: + return maybe_sanitize_url(self.url) + + uri1, remainder = self.url.split(',', 1) + return ','.join([maybe_sanitize_url(uri1), remainder]) diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/redis.py b/thesisenv/lib/python3.6/site-packages/celery/backends/redis.py new file mode 100644 index 0000000..1e838c1 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/backends/redis.py @@ -0,0 +1,295 @@ +# -*- coding: utf-8 -*- +""" + celery.backends.redis + ~~~~~~~~~~~~~~~~~~~~~ + + Redis result store backend. + +""" +from __future__ import absolute_import + +from functools import partial + +from kombu.utils import cached_property, retry_over_time +from kombu.utils.url import _parse_url + +from celery import states +from celery.canvas import maybe_signature +from celery.exceptions import ChordError, ImproperlyConfigured +from celery.five import string_t +from celery.utils import deprecated_property, strtobool +from celery.utils.functional import dictfilter +from celery.utils.log import get_logger +from celery.utils.timeutils import humanize_seconds + +from .base import KeyValueStoreBackend + +try: + import redis + from redis.exceptions import ConnectionError + from kombu.transport.redis import get_redis_error_classes +except ImportError: # pragma: no cover + redis = None # noqa + ConnectionError = None # noqa + get_redis_error_classes = None # noqa + +__all__ = ['RedisBackend'] + +REDIS_MISSING = """\ +You need to install the redis library in order to use \ +the Redis result store backend.""" + +logger = get_logger(__name__) +error = logger.error + + +class RedisBackend(KeyValueStoreBackend): + """Redis task result store.""" + + #: redis-py client module. + redis = redis + + #: Maximium number of connections in the pool. + max_connections = None + + supports_autoexpire = True + supports_native_join = True + implements_incr = True + + def __init__(self, host=None, port=None, db=None, password=None, + expires=None, max_connections=None, url=None, + connection_pool=None, new_join=False, **kwargs): + super(RedisBackend, self).__init__(**kwargs) + conf = self.app.conf + if self.redis is None: + raise ImproperlyConfigured(REDIS_MISSING) + self._client_capabilities = self._detect_client_capabilities() + + # For compatibility with the old REDIS_* configuration keys. + def _get(key): + for prefix in 'CELERY_REDIS_{0}', 'REDIS_{0}': + try: + return conf[prefix.format(key)] + except KeyError: + pass + if host and '://' in host: + url = host + host = None + + self.max_connections = ( + max_connections or _get('MAX_CONNECTIONS') or self.max_connections + ) + self._ConnectionPool = connection_pool + + self.connparams = { + 'host': _get('HOST') or 'localhost', + 'port': _get('PORT') or 6379, + 'db': _get('DB') or 0, + 'password': _get('PASSWORD'), + 'max_connections': self.max_connections, + } + if url: + self.connparams = self._params_from_url(url, self.connparams) + self.url = url + self.expires = self.prepare_expires(expires, type=int) + + try: + new_join = strtobool(self.connparams.pop('new_join')) + except KeyError: + pass + if new_join: + self.apply_chord = self._new_chord_apply + self.on_chord_part_return = self._new_chord_return + + self.connection_errors, self.channel_errors = ( + get_redis_error_classes() if get_redis_error_classes + else ((), ())) + + def _params_from_url(self, url, defaults): + scheme, host, port, user, password, path, query = _parse_url(url) + connparams = dict( + defaults, **dictfilter({ + 'host': host, 'port': port, 'password': password, + 'db': query.pop('virtual_host', None)}) + ) + + if scheme == 'socket': + # use 'path' as path to the socket… in this case + # the database number should be given in 'query' + connparams.update({ + 'connection_class': self.redis.UnixDomainSocketConnection, + 'path': '/' + path, + }) + # host+port are invalid options when using this connection type. + connparams.pop('host', None) + connparams.pop('port', None) + else: + connparams['db'] = path + + # db may be string and start with / like in kombu. + db = connparams.get('db') or 0 + db = db.strip('/') if isinstance(db, string_t) else db + connparams['db'] = int(db) + + # Query parameters override other parameters + connparams.update(query) + return connparams + + def get(self, key): + return self.client.get(key) + + def mget(self, keys): + return self.client.mget(keys) + + def ensure(self, fun, args, **policy): + retry_policy = dict(self.retry_policy, **policy) + max_retries = retry_policy.get('max_retries') + return retry_over_time( + fun, self.connection_errors, args, {}, + partial(self.on_connection_error, max_retries), + **retry_policy + ) + + def on_connection_error(self, max_retries, exc, intervals, retries): + tts = next(intervals) + error('Connection to Redis lost: Retry (%s/%s) %s.', + retries, max_retries or 'Inf', + humanize_seconds(tts, 'in ')) + return tts + + def set(self, key, value, **retry_policy): + return self.ensure(self._set, (key, value), **retry_policy) + + def _set(self, key, value): + with self.client.pipeline() as pipe: + if self.expires: + pipe.setex(key, value, self.expires) + else: + pipe.set(key, value) + pipe.publish(key, value) + pipe.execute() + + def delete(self, key): + self.client.delete(key) + + def incr(self, key): + return self.client.incr(key) + + def expire(self, key, value): + return self.client.expire(key, value) + + def _unpack_chord_result(self, tup, decode, + EXCEPTION_STATES=states.EXCEPTION_STATES, + PROPAGATE_STATES=states.PROPAGATE_STATES): + _, tid, state, retval = decode(tup) + if state in EXCEPTION_STATES: + retval = self.exception_to_python(retval) + if state in PROPAGATE_STATES: + raise ChordError('Dependency {0} raised {1!r}'.format(tid, retval)) + return retval + + def _new_chord_apply(self, header, partial_args, group_id, body, + result=None, **options): + # avoids saving the group in the redis db. + return header(*partial_args, task_id=group_id) + + def _new_chord_return(self, task, state, result, propagate=None, + PROPAGATE_STATES=states.PROPAGATE_STATES): + app = self.app + if propagate is None: + propagate = self.app.conf.CELERY_CHORD_PROPAGATES + request = task.request + tid, gid = request.id, request.group + if not gid or not tid: + return + + client = self.client + jkey = self.get_key_for_group(gid, '.j') + result = self.encode_result(result, state) + with client.pipeline() as pipe: + _, readycount, _ = pipe \ + .rpush(jkey, self.encode([1, tid, state, result])) \ + .llen(jkey) \ + .expire(jkey, 86400) \ + .execute() + + try: + callback = maybe_signature(request.chord, app=app) + total = callback['chord_size'] + if readycount == total: + decode, unpack = self.decode, self._unpack_chord_result + with client.pipeline() as pipe: + resl, _, = pipe \ + .lrange(jkey, 0, total) \ + .delete(jkey) \ + .execute() + try: + callback.delay([unpack(tup, decode) for tup in resl]) + except Exception as exc: + error('Chord callback for %r raised: %r', + request.group, exc, exc_info=1) + return self.chord_error_from_stack( + callback, + ChordError('Callback error: {0!r}'.format(exc)), + ) + except ChordError as exc: + error('Chord %r raised: %r', request.group, exc, exc_info=1) + return self.chord_error_from_stack(callback, exc) + except Exception as exc: + error('Chord %r raised: %r', request.group, exc, exc_info=1) + return self.chord_error_from_stack( + callback, ChordError('Join error: {0!r}'.format(exc)), + ) + + def _detect_client_capabilities(self, socket_connect_timeout=False): + if self.redis.VERSION < (2, 4, 4): + raise ImproperlyConfigured( + 'Redis backend requires redis-py versions 2.4.4 or later. ' + 'You have {0.__version__}'.format(redis)) + if self.redis.VERSION >= (2, 10): + socket_connect_timeout = True + return {'socket_connect_timeout': socket_connect_timeout} + + def _create_client(self, socket_timeout=None, socket_connect_timeout=None, + **params): + return self._new_redis_client( + socket_timeout=socket_timeout and float(socket_timeout), + socket_connect_timeout=socket_connect_timeout and float( + socket_connect_timeout), **params + ) + + def _new_redis_client(self, **params): + if not self._client_capabilities['socket_connect_timeout']: + params.pop('socket_connect_timeout', None) + return self.redis.Redis(connection_pool=self.ConnectionPool(**params)) + + @property + def ConnectionPool(self): + if self._ConnectionPool is None: + self._ConnectionPool = self.redis.ConnectionPool + return self._ConnectionPool + + @cached_property + def client(self): + return self._create_client(**self.connparams) + + def __reduce__(self, args=(), kwargs={}): + return super(RedisBackend, self).__reduce__( + (self.url, ), {'expires': self.expires}, + ) + + @deprecated_property(3.2, 3.3) + def host(self): + return self.connparams['host'] + + @deprecated_property(3.2, 3.3) + def port(self): + return self.connparams['port'] + + @deprecated_property(3.2, 3.3) + def db(self): + return self.connparams['db'] + + @deprecated_property(3.2, 3.3) + def password(self): + return self.connparams['password'] diff --git a/thesisenv/lib/python3.6/site-packages/celery/backends/rpc.py b/thesisenv/lib/python3.6/site-packages/celery/backends/rpc.py new file mode 100644 index 0000000..92bcc61 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/backends/rpc.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +""" + celery.backends.rpc + ~~~~~~~~~~~~~~~~~~~ + + RPC-style result backend, using reply-to and one queue per client. + +""" +from __future__ import absolute_import + +from kombu import Consumer, Exchange +from kombu.common import maybe_declare +from kombu.utils import cached_property + +from celery import current_task +from celery.backends import amqp + +__all__ = ['RPCBackend'] + + +class RPCBackend(amqp.AMQPBackend): + persistent = False + + class Consumer(Consumer): + auto_declare = False + + def _create_exchange(self, name, type='direct', delivery_mode=2): + # uses direct to queue routing (anon exchange). + return Exchange(None) + + def on_task_call(self, producer, task_id): + maybe_declare(self.binding(producer.channel), retry=True) + + def _create_binding(self, task_id): + return self.binding + + def _many_bindings(self, ids): + return [self.binding] + + def rkey(self, task_id): + return task_id + + def destination_for(self, task_id, request): + # Request is a new argument for backends, so must still support + # old code that rely on current_task + try: + request = request or current_task.request + except AttributeError: + raise RuntimeError( + 'RPC backend missing task request for {0!r}'.format(task_id), + ) + return request.reply_to, request.correlation_id or task_id + + def on_reply_declare(self, task_id): + pass + + def as_uri(self, include_password=True): + return 'rpc://' + + @property + def binding(self): + return self.Queue(self.oid, self.exchange, self.oid, + durable=False, auto_delete=False) + + @cached_property + def oid(self): + return self.app.oid diff --git a/thesisenv/lib/python3.6/site-packages/celery/beat.py b/thesisenv/lib/python3.6/site-packages/celery/beat.py new file mode 100644 index 0000000..368a903 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/beat.py @@ -0,0 +1,571 @@ +# -*- coding: utf-8 -*- +""" + celery.beat + ~~~~~~~~~~~ + + The periodic task scheduler. + +""" +from __future__ import absolute_import + +import errno +import os +import time +import shelve +import sys +import traceback + +from threading import Event, Thread + +from billiard import ensure_multiprocessing +from billiard.process import Process +from billiard.common import reset_signals +from kombu.utils import cached_property, reprcall +from kombu.utils.functional import maybe_evaluate + +from . import __version__ +from . import platforms +from . import signals +from .five import items, reraise, values, monotonic +from .schedules import maybe_schedule, crontab +from .utils.imports import instantiate +from .utils.timeutils import humanize_seconds +from .utils.log import get_logger, iter_open_logger_fds + +__all__ = ['SchedulingError', 'ScheduleEntry', 'Scheduler', + 'PersistentScheduler', 'Service', 'EmbeddedService'] + +logger = get_logger(__name__) +debug, info, error, warning = (logger.debug, logger.info, + logger.error, logger.warning) + +DEFAULT_MAX_INTERVAL = 300 # 5 minutes + + +class SchedulingError(Exception): + """An error occured while scheduling a task.""" + + +class ScheduleEntry(object): + """An entry in the scheduler. + + :keyword name: see :attr:`name`. + :keyword schedule: see :attr:`schedule`. + :keyword args: see :attr:`args`. + :keyword kwargs: see :attr:`kwargs`. + :keyword options: see :attr:`options`. + :keyword last_run_at: see :attr:`last_run_at`. + :keyword total_run_count: see :attr:`total_run_count`. + :keyword relative: Is the time relative to when the server starts? + + """ + + #: The task name + name = None + + #: The schedule (run_every/crontab) + schedule = None + + #: Positional arguments to apply. + args = None + + #: Keyword arguments to apply. + kwargs = None + + #: Task execution options. + options = None + + #: The time and date of when this task was last scheduled. + last_run_at = None + + #: Total number of times this task has been scheduled. + total_run_count = 0 + + def __init__(self, name=None, task=None, last_run_at=None, + total_run_count=None, schedule=None, args=(), kwargs={}, + options={}, relative=False, app=None): + self.app = app + self.name = name + self.task = task + self.args = args + self.kwargs = kwargs + self.options = options + self.schedule = maybe_schedule(schedule, relative, app=self.app) + self.last_run_at = last_run_at or self._default_now() + self.total_run_count = total_run_count or 0 + + def _default_now(self): + return self.schedule.now() if self.schedule else self.app.now() + + def _next_instance(self, last_run_at=None): + """Return a new instance of the same class, but with + its date and count fields updated.""" + return self.__class__(**dict( + self, + last_run_at=last_run_at or self._default_now(), + total_run_count=self.total_run_count + 1, + )) + __next__ = next = _next_instance # for 2to3 + + def __reduce__(self): + return self.__class__, ( + self.name, self.task, self.last_run_at, self.total_run_count, + self.schedule, self.args, self.kwargs, self.options, + ) + + def update(self, other): + """Update values from another entry. + + Does only update "editable" fields (task, schedule, args, kwargs, + options). + + """ + self.__dict__.update({'task': other.task, 'schedule': other.schedule, + 'args': other.args, 'kwargs': other.kwargs, + 'options': other.options}) + + def is_due(self): + """See :meth:`~celery.schedule.schedule.is_due`.""" + return self.schedule.is_due(self.last_run_at) + + def __iter__(self): + return iter(items(vars(self))) + + def __repr__(self): + return '%s', entry.task, result.id) + return next_time_to_run + + def tick(self): + """Run a tick, that is one iteration of the scheduler. + + Executes all due tasks. + + """ + remaining_times = [] + try: + for entry in values(self.schedule): + next_time_to_run = self.maybe_due(entry, self.publisher) + if next_time_to_run: + remaining_times.append(next_time_to_run) + except RuntimeError: + pass + + return min(remaining_times + [self.max_interval]) + + def should_sync(self): + return ( + (not self._last_sync or + (monotonic() - self._last_sync) > self.sync_every) or + (self.sync_every_tasks and + self._tasks_since_sync >= self.sync_every_tasks) + ) + + def reserve(self, entry): + new_entry = self.schedule[entry.name] = next(entry) + return new_entry + + def apply_async(self, entry, publisher=None, **kwargs): + # Update timestamps and run counts before we actually execute, + # so we have that done if an exception is raised (doesn't schedule + # forever.) + entry = self.reserve(entry) + task = self.app.tasks.get(entry.task) + + try: + if task: + result = task.apply_async(entry.args, entry.kwargs, + publisher=publisher, + **entry.options) + else: + result = self.send_task(entry.task, entry.args, entry.kwargs, + publisher=publisher, + **entry.options) + except Exception as exc: + reraise(SchedulingError, SchedulingError( + "Couldn't apply scheduled task {0.name}: {exc}".format( + entry, exc=exc)), sys.exc_info()[2]) + finally: + self._tasks_since_sync += 1 + if self.should_sync(): + self._do_sync() + return result + + def send_task(self, *args, **kwargs): + return self.app.send_task(*args, **kwargs) + + def setup_schedule(self): + self.install_default_entries(self.data) + + def _do_sync(self): + try: + debug('beat: Synchronizing schedule...') + self.sync() + finally: + self._last_sync = monotonic() + self._tasks_since_sync = 0 + + def sync(self): + pass + + def close(self): + self.sync() + + def add(self, **kwargs): + entry = self.Entry(app=self.app, **kwargs) + self.schedule[entry.name] = entry + return entry + + def _maybe_entry(self, name, entry): + if isinstance(entry, self.Entry): + entry.app = self.app + return entry + return self.Entry(**dict(entry, name=name, app=self.app)) + + def update_from_dict(self, dict_): + self.schedule.update(dict( + (name, self._maybe_entry(name, entry)) + for name, entry in items(dict_))) + + def merge_inplace(self, b): + schedule = self.schedule + A, B = set(schedule), set(b) + + # Remove items from disk not in the schedule anymore. + for key in A ^ B: + schedule.pop(key, None) + + # Update and add new items in the schedule + for key in B: + entry = self.Entry(**dict(b[key], name=key, app=self.app)) + if schedule.get(key): + schedule[key].update(entry) + else: + schedule[key] = entry + + def _ensure_connected(self): + # callback called for each retry while the connection + # can't be established. + def _error_handler(exc, interval): + error('beat: Connection error: %s. ' + 'Trying again in %s seconds...', exc, interval) + + return self.connection.ensure_connection( + _error_handler, self.app.conf.BROKER_CONNECTION_MAX_RETRIES + ) + + def get_schedule(self): + return self.data + + def set_schedule(self, schedule): + self.data = schedule + schedule = property(get_schedule, set_schedule) + + @cached_property + def connection(self): + return self.app.connection() + + @cached_property + def publisher(self): + return self.Publisher(self._ensure_connected()) + + @property + def info(self): + return '' + + +class PersistentScheduler(Scheduler): + persistence = shelve + known_suffixes = ('', '.db', '.dat', '.bak', '.dir') + + _store = None + + def __init__(self, *args, **kwargs): + self.schedule_filename = kwargs.get('schedule_filename') + Scheduler.__init__(self, *args, **kwargs) + + def _remove_db(self): + for suffix in self.known_suffixes: + with platforms.ignore_errno(errno.ENOENT): + os.remove(self.schedule_filename + suffix) + + def _open_schedule(self): + return self.persistence.open(self.schedule_filename, writeback=True) + + def _destroy_open_corrupted_schedule(self, exc): + error('Removing corrupted schedule file %r: %r', + self.schedule_filename, exc, exc_info=True) + self._remove_db() + return self._open_schedule() + + def setup_schedule(self): + try: + self._store = self._open_schedule() + # In some cases there may be different errors from a storage + # backend for corrupted files. Example - DBPageNotFoundError + # exception from bsddb. In such case the file will be + # successfully opened but the error will be raised on first key + # retrieving. + self._store.keys() + except Exception as exc: + self._store = self._destroy_open_corrupted_schedule(exc) + + for _ in (1, 2): + try: + self._store['entries'] + except KeyError: + # new schedule db + try: + self._store['entries'] = {} + except KeyError as exc: + self._store = self._destroy_open_corrupted_schedule(exc) + continue + else: + if '__version__' not in self._store: + warning('DB Reset: Account for new __version__ field') + self._store.clear() # remove schedule at 2.2.2 upgrade. + elif 'tz' not in self._store: + warning('DB Reset: Account for new tz field') + self._store.clear() # remove schedule at 3.0.8 upgrade + elif 'utc_enabled' not in self._store: + warning('DB Reset: Account for new utc_enabled field') + self._store.clear() # remove schedule at 3.0.9 upgrade + break + + tz = self.app.conf.CELERY_TIMEZONE + stored_tz = self._store.get('tz') + if stored_tz is not None and stored_tz != tz: + warning('Reset: Timezone changed from %r to %r', stored_tz, tz) + self._store.clear() # Timezone changed, reset db! + utc = self.app.conf.CELERY_ENABLE_UTC + stored_utc = self._store.get('utc_enabled') + if stored_utc is not None and stored_utc != utc: + choices = {True: 'enabled', False: 'disabled'} + warning('Reset: UTC changed from %s to %s', + choices[stored_utc], choices[utc]) + self._store.clear() # UTC setting changed, reset db! + entries = self._store.setdefault('entries', {}) + self.merge_inplace(self.app.conf.CELERYBEAT_SCHEDULE) + self.install_default_entries(self.schedule) + self._store.update(__version__=__version__, tz=tz, utc_enabled=utc) + self.sync() + debug('Current schedule:\n' + '\n'.join( + repr(entry) for entry in values(entries))) + + def get_schedule(self): + return self._store['entries'] + + def set_schedule(self, schedule): + self._store['entries'] = schedule + schedule = property(get_schedule, set_schedule) + + def sync(self): + if self._store is not None: + self._store.sync() + + def close(self): + self.sync() + self._store.close() + + @property + def info(self): + return ' . db -> {self.schedule_filename}'.format(self=self) + + +class Service(object): + scheduler_cls = PersistentScheduler + + def __init__(self, app, max_interval=None, schedule_filename=None, + scheduler_cls=None): + self.app = app + self.max_interval = (max_interval or + app.conf.CELERYBEAT_MAX_LOOP_INTERVAL) + self.scheduler_cls = scheduler_cls or self.scheduler_cls + self.schedule_filename = ( + schedule_filename or app.conf.CELERYBEAT_SCHEDULE_FILENAME) + + self._is_shutdown = Event() + self._is_stopped = Event() + + def __reduce__(self): + return self.__class__, (self.max_interval, self.schedule_filename, + self.scheduler_cls, self.app) + + def start(self, embedded_process=False, drift=-0.010): + info('beat: Starting...') + debug('beat: Ticking with max interval->%s', + humanize_seconds(self.scheduler.max_interval)) + + signals.beat_init.send(sender=self) + if embedded_process: + signals.beat_embedded_init.send(sender=self) + platforms.set_process_title('celery beat') + + try: + while not self._is_shutdown.is_set(): + interval = self.scheduler.tick() + interval = interval + drift if interval else interval + if interval and interval > 0: + debug('beat: Waking up %s.', + humanize_seconds(interval, prefix='in ')) + time.sleep(interval) + if self.scheduler.should_sync(): + self.scheduler._do_sync() + except (KeyboardInterrupt, SystemExit): + self._is_shutdown.set() + finally: + self.sync() + + def sync(self): + self.scheduler.close() + self._is_stopped.set() + + def stop(self, wait=False): + info('beat: Shutting down...') + self._is_shutdown.set() + wait and self._is_stopped.wait() # block until shutdown done. + + def get_scheduler(self, lazy=False): + filename = self.schedule_filename + scheduler = instantiate(self.scheduler_cls, + app=self.app, + schedule_filename=filename, + max_interval=self.max_interval, + lazy=lazy) + return scheduler + + @cached_property + def scheduler(self): + return self.get_scheduler() + + +class _Threaded(Thread): + """Embedded task scheduler using threading.""" + + def __init__(self, app, **kwargs): + super(_Threaded, self).__init__() + self.app = app + self.service = Service(app, **kwargs) + self.daemon = True + self.name = 'Beat' + + def run(self): + self.app.set_current() + self.service.start() + + def stop(self): + self.service.stop(wait=True) + + +try: + ensure_multiprocessing() +except NotImplementedError: # pragma: no cover + _Process = None +else: + class _Process(Process): # noqa + + def __init__(self, app, **kwargs): + super(_Process, self).__init__() + self.app = app + self.service = Service(app, **kwargs) + self.name = 'Beat' + + def run(self): + reset_signals(full=False) + platforms.close_open_fds([ + sys.__stdin__, sys.__stdout__, sys.__stderr__, + ] + list(iter_open_logger_fds())) + self.app.set_default() + self.app.set_current() + self.service.start(embedded_process=True) + + def stop(self): + self.service.stop() + self.terminate() + + +def EmbeddedService(app, max_interval=None, **kwargs): + """Return embedded clock service. + + :keyword thread: Run threaded instead of as a separate process. + Uses :mod:`multiprocessing` by default, if available. + + """ + if kwargs.pop('thread', False) or _Process is None: + # Need short max interval to be able to stop thread + # in reasonable time. + return _Threaded(app, max_interval=1, **kwargs) + return _Process(app, max_interval=max_interval, **kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/bin/__init__.py new file mode 100644 index 0000000..3f44b50 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/bin/__init__.py @@ -0,0 +1,5 @@ +from __future__ import absolute_import + +from .base import Option + +__all__ = ['Option'] diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/amqp.py b/thesisenv/lib/python3.6/site-packages/celery/bin/amqp.py new file mode 100644 index 0000000..ce3b351 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/bin/amqp.py @@ -0,0 +1,380 @@ +# -*- coding: utf-8 -*- +""" +The :program:`celery amqp` command. + +.. program:: celery amqp + +""" +from __future__ import absolute_import, print_function, unicode_literals + +import cmd +import sys +import shlex +import pprint + +from functools import partial +from itertools import count + +from kombu.utils.encoding import safe_str + +from celery.utils.functional import padlist + +from celery.bin.base import Command +from celery.five import string_t +from celery.utils import strtobool + +__all__ = ['AMQPAdmin', 'AMQShell', 'Spec', 'amqp'] + +# Map to coerce strings to other types. +COERCE = {bool: strtobool} + +HELP_HEADER = """ +Commands +-------- +""".rstrip() + +EXAMPLE_TEXT = """ +Example: + -> queue.delete myqueue yes no +""" + +say = partial(print, file=sys.stderr) + + +class Spec(object): + """AMQP Command specification. + + Used to convert arguments to Python values and display various help + and tooltips. + + :param args: see :attr:`args`. + :keyword returns: see :attr:`returns`. + + .. attribute args:: + + List of arguments this command takes. Should + contain `(argument_name, argument_type)` tuples. + + .. attribute returns: + + Helpful human string representation of what this command returns. + May be :const:`None`, to signify the return type is unknown. + + """ + def __init__(self, *args, **kwargs): + self.args = args + self.returns = kwargs.get('returns') + + def coerce(self, index, value): + """Coerce value for argument at index.""" + arg_info = self.args[index] + arg_type = arg_info[1] + # Might be a custom way to coerce the string value, + # so look in the coercion map. + return COERCE.get(arg_type, arg_type)(value) + + def str_args_to_python(self, arglist): + """Process list of string arguments to values according to spec. + + e.g: + + >>> spec = Spec([('queue', str), ('if_unused', bool)]) + >>> spec.str_args_to_python('pobox', 'true') + ('pobox', True) + + """ + return tuple( + self.coerce(index, value) for index, value in enumerate(arglist)) + + def format_response(self, response): + """Format the return value of this command in a human-friendly way.""" + if not self.returns: + return 'ok.' if response is None else response + if callable(self.returns): + return self.returns(response) + return self.returns.format(response) + + def format_arg(self, name, type, default_value=None): + if default_value is not None: + return '{0}:{1}'.format(name, default_value) + return name + + def format_signature(self): + return ' '.join(self.format_arg(*padlist(list(arg), 3)) + for arg in self.args) + + +def dump_message(message): + if message is None: + return 'No messages in queue. basic.publish something.' + return {'body': message.body, + 'properties': message.properties, + 'delivery_info': message.delivery_info} + + +def format_declare_queue(ret): + return 'ok. queue:{0} messages:{1} consumers:{2}.'.format(*ret) + + +class AMQShell(cmd.Cmd): + """AMQP API Shell. + + :keyword connect: Function used to connect to the server, must return + connection object. + + :keyword silent: If :const:`True`, the commands won't have annoying + output not relevant when running in non-shell mode. + + + .. attribute: builtins + + Mapping of built-in command names -> method names + + .. attribute:: amqp + + Mapping of AMQP API commands and their :class:`Spec`. + + """ + conn = None + chan = None + prompt_fmt = '{self.counter}> ' + identchars = cmd.IDENTCHARS = '.' + needs_reconnect = False + counter = 1 + inc_counter = count(2) + + builtins = {'EOF': 'do_exit', + 'exit': 'do_exit', + 'help': 'do_help'} + + amqp = { + 'exchange.declare': Spec(('exchange', str), + ('type', str), + ('passive', bool, 'no'), + ('durable', bool, 'no'), + ('auto_delete', bool, 'no'), + ('internal', bool, 'no')), + 'exchange.delete': Spec(('exchange', str), + ('if_unused', bool)), + 'queue.bind': Spec(('queue', str), + ('exchange', str), + ('routing_key', str)), + 'queue.declare': Spec(('queue', str), + ('passive', bool, 'no'), + ('durable', bool, 'no'), + ('exclusive', bool, 'no'), + ('auto_delete', bool, 'no'), + returns=format_declare_queue), + 'queue.delete': Spec(('queue', str), + ('if_unused', bool, 'no'), + ('if_empty', bool, 'no'), + returns='ok. {0} messages deleted.'), + 'queue.purge': Spec(('queue', str), + returns='ok. {0} messages deleted.'), + 'basic.get': Spec(('queue', str), + ('no_ack', bool, 'off'), + returns=dump_message), + 'basic.publish': Spec(('msg', str), + ('exchange', str), + ('routing_key', str), + ('mandatory', bool, 'no'), + ('immediate', bool, 'no')), + 'basic.ack': Spec(('delivery_tag', int)), + } + + def _prepare_spec(self, conn): + # XXX Hack to fix Issue #2013 + from amqp import Connection, Message + if isinstance(conn.connection, Connection): + self.amqp['basic.publish'] = Spec(('msg', Message), + ('exchange', str), + ('routing_key', str), + ('mandatory', bool, 'no'), + ('immediate', bool, 'no')) + + def __init__(self, *args, **kwargs): + self.connect = kwargs.pop('connect') + self.silent = kwargs.pop('silent', False) + self.out = kwargs.pop('out', sys.stderr) + cmd.Cmd.__init__(self, *args, **kwargs) + self._reconnect() + + def note(self, m): + """Say something to the user. Disabled if :attr:`silent`.""" + if not self.silent: + say(m, file=self.out) + + def say(self, m): + say(m, file=self.out) + + def get_amqp_api_command(self, cmd, arglist): + """With a command name and a list of arguments, convert the arguments + to Python values and find the corresponding method on the AMQP channel + object. + + :returns: tuple of `(method, processed_args)`. + + """ + spec = self.amqp[cmd] + args = spec.str_args_to_python(arglist) + attr_name = cmd.replace('.', '_') + if self.needs_reconnect: + self._reconnect() + return getattr(self.chan, attr_name), args, spec.format_response + + def do_exit(self, *args): + """The `'exit'` command.""" + self.note("\n-> please, don't leave!") + sys.exit(0) + + def display_command_help(self, cmd, short=False): + spec = self.amqp[cmd] + self.say('{0} {1}'.format(cmd, spec.format_signature())) + + def do_help(self, *args): + if not args: + self.say(HELP_HEADER) + for cmd_name in self.amqp: + self.display_command_help(cmd_name, short=True) + self.say(EXAMPLE_TEXT) + else: + self.display_command_help(args[0]) + + def default(self, line): + self.say("unknown syntax: {0!r}. how about some 'help'?".format(line)) + + def get_names(self): + return set(self.builtins) | set(self.amqp) + + def completenames(self, text, *ignored): + """Return all commands starting with `text`, for tab-completion.""" + names = self.get_names() + first = [cmd for cmd in names + if cmd.startswith(text.replace('_', '.'))] + if first: + return first + return [cmd for cmd in names + if cmd.partition('.')[2].startswith(text)] + + def dispatch(self, cmd, argline): + """Dispatch and execute the command. + + Lookup order is: :attr:`builtins` -> :attr:`amqp`. + + """ + arglist = shlex.split(safe_str(argline)) + if cmd in self.builtins: + return getattr(self, self.builtins[cmd])(*arglist) + fun, args, formatter = self.get_amqp_api_command(cmd, arglist) + return formatter(fun(*args)) + + def parseline(self, line): + """Parse input line. + + :returns: tuple of three items: + `(command_name, arglist, original_line)` + + """ + parts = line.split() + if parts: + return parts[0], ' '.join(parts[1:]), line + return '', '', line + + def onecmd(self, line): + """Parse line and execute command.""" + cmd, arg, line = self.parseline(line) + if not line: + return self.emptyline() + self.lastcmd = line + self.counter = next(self.inc_counter) + try: + self.respond(self.dispatch(cmd, arg)) + except (AttributeError, KeyError) as exc: + self.default(line) + except Exception as exc: + self.say(exc) + self.needs_reconnect = True + + def respond(self, retval): + """What to do with the return value of a command.""" + if retval is not None: + if isinstance(retval, string_t): + self.say(retval) + else: + self.say(pprint.pformat(retval)) + + def _reconnect(self): + """Re-establish connection to the AMQP server.""" + self.conn = self.connect(self.conn) + self._prepare_spec(self.conn) + self.chan = self.conn.default_channel + self.needs_reconnect = False + + @property + def prompt(self): + return self.prompt_fmt.format(self=self) + + +class AMQPAdmin(object): + """The celery :program:`celery amqp` utility.""" + Shell = AMQShell + + def __init__(self, *args, **kwargs): + self.app = kwargs['app'] + self.out = kwargs.setdefault('out', sys.stderr) + self.silent = kwargs.get('silent') + self.args = args + + def connect(self, conn=None): + if conn: + conn.close() + conn = self.app.connection() + self.note('-> connecting to {0}.'.format(conn.as_uri())) + conn.connect() + self.note('-> connected.') + return conn + + def run(self): + shell = self.Shell(connect=self.connect, out=self.out) + if self.args: + return shell.onecmd(' '.join(self.args)) + try: + return shell.cmdloop() + except KeyboardInterrupt: + self.note('(bibi)') + pass + + def note(self, m): + if not self.silent: + say(m, file=self.out) + + +class amqp(Command): + """AMQP Administration Shell. + + Also works for non-amqp transports (but not ones that + store declarations in memory). + + Examples:: + + celery amqp + start shell mode + celery amqp help + show list of commands + + celery amqp exchange.delete name + celery amqp queue.delete queue + celery amqp queue.delete queue yes yes + + """ + + def run(self, *args, **options): + options['app'] = self.app + return AMQPAdmin(*args, **options).run() + + +def main(): + amqp().execute_from_commandline() + +if __name__ == '__main__': # pragma: no cover + main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/base.py b/thesisenv/lib/python3.6/site-packages/celery/bin/base.py new file mode 100644 index 0000000..9044b7b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/bin/base.py @@ -0,0 +1,668 @@ +# -*- coding: utf-8 -*- +""" + +.. _preload-options: + +Preload Options +--------------- + +These options are supported by all commands, +and usually parsed before command-specific arguments. + +.. cmdoption:: -A, --app + + app instance to use (e.g. module.attr_name) + +.. cmdoption:: -b, --broker + + url to broker. default is 'amqp://guest@localhost//' + +.. cmdoption:: --loader + + name of custom loader class to use. + +.. cmdoption:: --config + + Name of the configuration module + +.. _daemon-options: + +Daemon Options +-------------- + +These options are supported by commands that can detach +into the background (daemon). They will be present +in any command that also has a `--detach` option. + +.. cmdoption:: -f, --logfile + + Path to log file. If no logfile is specified, `stderr` is used. + +.. cmdoption:: --pidfile + + Optional file used to store the process pid. + + The program will not start if this file already exists + and the pid is still alive. + +.. cmdoption:: --uid + + User id, or user name of the user to run as after detaching. + +.. cmdoption:: --gid + + Group id, or group name of the main group to change to after + detaching. + +.. cmdoption:: --umask + + Effective umask (in octal) of the process after detaching. Inherits + the umask of the parent process by default. + +.. cmdoption:: --workdir + + Optional directory to change to after detaching. + +.. cmdoption:: --executable + + Executable to use for the detached process. + +""" +from __future__ import absolute_import, print_function, unicode_literals + +import os +import random +import re +import sys +import warnings +import json + +from collections import defaultdict +from heapq import heappush +from inspect import getargspec +from optparse import OptionParser, IndentedHelpFormatter, make_option as Option +from pprint import pformat + +from celery import VERSION_BANNER, Celery, maybe_patch_concurrency +from celery import signals +from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning +from celery.five import items, string, string_t +from celery.platforms import EX_FAILURE, EX_OK, EX_USAGE +from celery.utils import term +from celery.utils import text +from celery.utils import node_format, host_format +from celery.utils.imports import symbol_by_name, import_from_cwd + +try: + input = raw_input +except NameError: + pass + +# always enable DeprecationWarnings, so our users can see them. +for warning in (CDeprecationWarning, CPendingDeprecationWarning): + warnings.simplefilter('once', warning, 0) + +ARGV_DISABLED = """ +Unrecognized command-line arguments: {0} + +Try --help? +""" + +find_long_opt = re.compile(r'.+?(--.+?)(?:\s|,|$)') +find_rst_ref = re.compile(r':\w+:`(.+?)`') + +__all__ = ['Error', 'UsageError', 'Extensions', 'HelpFormatter', + 'Command', 'Option', 'daemon_options'] + + +class Error(Exception): + status = EX_FAILURE + + def __init__(self, reason, status=None): + self.reason = reason + self.status = status if status is not None else self.status + super(Error, self).__init__(reason, status) + + def __str__(self): + return self.reason + __unicode__ = __str__ + + +class UsageError(Error): + status = EX_USAGE + + +class Extensions(object): + + def __init__(self, namespace, register): + self.names = [] + self.namespace = namespace + self.register = register + + def add(self, cls, name): + heappush(self.names, name) + self.register(cls, name=name) + + def load(self): + try: + from pkg_resources import iter_entry_points + except ImportError: # pragma: no cover + return + + for ep in iter_entry_points(self.namespace): + sym = ':'.join([ep.module_name, ep.attrs[0]]) + try: + cls = symbol_by_name(sym) + except (ImportError, SyntaxError) as exc: + warnings.warn( + 'Cannot load extension {0!r}: {1!r}'.format(sym, exc)) + else: + self.add(cls, ep.name) + return self.names + + +class HelpFormatter(IndentedHelpFormatter): + + def format_epilog(self, epilog): + if epilog: + return '\n{0}\n\n'.format(epilog) + return '' + + def format_description(self, description): + return text.ensure_2lines(text.fill_paragraphs( + text.dedent(description), self.width)) + + +class Command(object): + """Base class for command-line applications. + + :keyword app: The current app. + :keyword get_app: Callable returning the current app if no app provided. + + """ + Error = Error + UsageError = UsageError + Parser = OptionParser + + #: Arg list used in help. + args = '' + + #: Application version. + version = VERSION_BANNER + + #: If false the parser will raise an exception if positional + #: args are provided. + supports_args = True + + #: List of options (without preload options). + option_list = () + + # module Rst documentation to parse help from (if any) + doc = None + + # Some programs (multi) does not want to load the app specified + # (Issue #1008). + respects_app_option = True + + #: List of options to parse before parsing other options. + preload_options = ( + Option('-A', '--app', default=None), + Option('-b', '--broker', default=None), + Option('--loader', default=None), + Option('--config', default=None), + Option('--workdir', default=None, dest='working_directory'), + Option('--no-color', '-C', action='store_true', default=None), + Option('--quiet', '-q', action='store_true'), + ) + + #: Enable if the application should support config from the cmdline. + enable_config_from_cmdline = False + + #: Default configuration namespace. + namespace = 'celery' + + #: Text to print at end of --help + epilog = None + + #: Text to print in --help before option list. + description = '' + + #: Set to true if this command doesn't have subcommands + leaf = True + + # used by :meth:`say_remote_command_reply`. + show_body = True + # used by :meth:`say_chat`. + show_reply = True + + prog_name = 'celery' + + def __init__(self, app=None, get_app=None, no_color=False, + stdout=None, stderr=None, quiet=False, on_error=None, + on_usage_error=None): + self.app = app + self.get_app = get_app or self._get_default_app + self.stdout = stdout or sys.stdout + self.stderr = stderr or sys.stderr + self._colored = None + self._no_color = no_color + self.quiet = quiet + if not self.description: + self.description = self.__doc__ + if on_error: + self.on_error = on_error + if on_usage_error: + self.on_usage_error = on_usage_error + + def run(self, *args, **options): + """This is the body of the command called by :meth:`handle_argv`.""" + raise NotImplementedError('subclass responsibility') + + def on_error(self, exc): + self.error(self.colored.red('Error: {0}'.format(exc))) + + def on_usage_error(self, exc): + self.handle_error(exc) + + def on_concurrency_setup(self): + pass + + def __call__(self, *args, **kwargs): + random.seed() # maybe we were forked. + self.verify_args(args) + try: + ret = self.run(*args, **kwargs) + return ret if ret is not None else EX_OK + except self.UsageError as exc: + self.on_usage_error(exc) + return exc.status + except self.Error as exc: + self.on_error(exc) + return exc.status + + def verify_args(self, given, _index=0): + S = getargspec(self.run) + _index = 1 if S.args and S.args[0] == 'self' else _index + required = S.args[_index:-len(S.defaults) if S.defaults else None] + missing = required[len(given):] + if missing: + raise self.UsageError('Missing required {0}: {1}'.format( + text.pluralize(len(missing), 'argument'), + ', '.join(missing) + )) + + def execute_from_commandline(self, argv=None): + """Execute application from command-line. + + :keyword argv: The list of command-line arguments. + Defaults to ``sys.argv``. + + """ + if argv is None: + argv = list(sys.argv) + # Should we load any special concurrency environment? + self.maybe_patch_concurrency(argv) + self.on_concurrency_setup() + + # Dump version and exit if '--version' arg set. + self.early_version(argv) + argv = self.setup_app_from_commandline(argv) + self.prog_name = os.path.basename(argv[0]) + return self.handle_argv(self.prog_name, argv[1:]) + + def run_from_argv(self, prog_name, argv=None, command=None): + return self.handle_argv(prog_name, + sys.argv if argv is None else argv, command) + + def maybe_patch_concurrency(self, argv=None): + argv = argv or sys.argv + pool_option = self.with_pool_option(argv) + if pool_option: + maybe_patch_concurrency(argv, *pool_option) + short_opts, long_opts = pool_option + + def usage(self, command): + return '%prog {0} [options] {self.args}'.format(command, self=self) + + def get_options(self): + """Get supported command-line options.""" + return self.option_list + + def expanduser(self, value): + if isinstance(value, string_t): + return os.path.expanduser(value) + return value + + def ask(self, q, choices, default=None): + """Prompt user to choose from a tuple of string values. + + :param q: the question to ask (do not include questionark) + :param choice: tuple of possible choices, must be lowercase. + :param default: Default value if any. + + If a default is not specified the question will be repeated + until the user gives a valid choice. + + Matching is done case insensitively. + + """ + schoices = choices + if default is not None: + schoices = [c.upper() if c == default else c.lower() + for c in choices] + schoices = '/'.join(schoices) + + p = '{0} ({1})? '.format(q.capitalize(), schoices) + while 1: + val = input(p).lower() + if val in choices: + return val + elif default is not None: + break + return default + + def handle_argv(self, prog_name, argv, command=None): + """Parse command-line arguments from ``argv`` and dispatch + to :meth:`run`. + + :param prog_name: The program name (``argv[0]``). + :param argv: Command arguments. + + Exits with an error message if :attr:`supports_args` is disabled + and ``argv`` contains positional arguments. + + """ + options, args = self.prepare_args( + *self.parse_options(prog_name, argv, command)) + return self(*args, **options) + + def prepare_args(self, options, args): + if options: + options = dict((k, self.expanduser(v)) + for k, v in items(vars(options)) + if not k.startswith('_')) + args = [self.expanduser(arg) for arg in args] + self.check_args(args) + return options, args + + def check_args(self, args): + if not self.supports_args and args: + self.die(ARGV_DISABLED.format(', '.join(args)), EX_USAGE) + + def error(self, s): + self.out(s, fh=self.stderr) + + def out(self, s, fh=None): + print(s, file=fh or self.stdout) + + def die(self, msg, status=EX_FAILURE): + self.error(msg) + sys.exit(status) + + def early_version(self, argv): + if '--version' in argv: + print(self.version, file=self.stdout) + sys.exit(0) + + def parse_options(self, prog_name, arguments, command=None): + """Parse the available options.""" + # Don't want to load configuration to just print the version, + # so we handle --version manually here. + self.parser = self.create_parser(prog_name, command) + return self.parser.parse_args(arguments) + + def create_parser(self, prog_name, command=None): + option_list = ( + self.preload_options + + self.get_options() + + tuple(self.app.user_options['preload']) + ) + return self.prepare_parser(self.Parser( + prog=prog_name, + usage=self.usage(command), + version=self.version, + epilog=self.epilog, + formatter=HelpFormatter(), + description=self.description, + option_list=option_list, + )) + + def prepare_parser(self, parser): + docs = [self.parse_doc(doc) for doc in (self.doc, __doc__) if doc] + for doc in docs: + for long_opt, help in items(doc): + option = parser.get_option(long_opt) + if option is not None: + option.help = ' '.join(help).format(default=option.default) + return parser + + def setup_app_from_commandline(self, argv): + preload_options = self.parse_preload_options(argv) + quiet = preload_options.get('quiet') + if quiet is not None: + self.quiet = quiet + try: + self.no_color = preload_options['no_color'] + except KeyError: + pass + workdir = preload_options.get('working_directory') + if workdir: + os.chdir(workdir) + app = (preload_options.get('app') or + os.environ.get('CELERY_APP') or + self.app) + preload_loader = preload_options.get('loader') + if preload_loader: + # Default app takes loader from this env (Issue #1066). + os.environ['CELERY_LOADER'] = preload_loader + loader = (preload_loader, + os.environ.get('CELERY_LOADER') or + 'default') + broker = preload_options.get('broker', None) + if broker: + os.environ['CELERY_BROKER_URL'] = broker + config = preload_options.get('config') + if config: + os.environ['CELERY_CONFIG_MODULE'] = config + if self.respects_app_option: + if app: + self.app = self.find_app(app) + elif self.app is None: + self.app = self.get_app(loader=loader) + if self.enable_config_from_cmdline: + argv = self.process_cmdline_config(argv) + else: + self.app = Celery(fixups=[]) + + user_preload = tuple(self.app.user_options['preload'] or ()) + if user_preload: + user_options = self.preparse_options(argv, user_preload) + for user_option in user_preload: + user_options.setdefault(user_option.dest, user_option.default) + signals.user_preload_options.send( + sender=self, app=self.app, options=user_options, + ) + return argv + + def find_app(self, app): + from celery.app.utils import find_app + return find_app(app, symbol_by_name=self.symbol_by_name) + + def symbol_by_name(self, name, imp=import_from_cwd): + return symbol_by_name(name, imp=imp) + get_cls_by_name = symbol_by_name # XXX compat + + def process_cmdline_config(self, argv): + try: + cargs_start = argv.index('--') + except ValueError: + return argv + argv, cargs = argv[:cargs_start], argv[cargs_start + 1:] + self.app.config_from_cmdline(cargs, namespace=self.namespace) + return argv + + def parse_preload_options(self, args): + return self.preparse_options(args, self.preload_options) + + def add_append_opt(self, acc, opt, value): + acc.setdefault(opt.dest, opt.default or []) + acc[opt.dest].append(value) + + def preparse_options(self, args, options): + acc = {} + opts = {} + for opt in options: + for t in (opt._long_opts, opt._short_opts): + opts.update(dict(zip(t, [opt] * len(t)))) + index = 0 + length = len(args) + while index < length: + arg = args[index] + if arg.startswith('--'): + if '=' in arg: + key, value = arg.split('=', 1) + opt = opts.get(key) + if opt: + if opt.action == 'append': + self.add_append_opt(acc, opt, value) + else: + acc[opt.dest] = value + else: + opt = opts.get(arg) + if opt and opt.takes_value(): + # optparse also supports ['--opt', 'value'] + # (Issue #1668) + if opt.action == 'append': + self.add_append_opt(acc, opt, args[index + 1]) + else: + acc[opt.dest] = args[index + 1] + index += 1 + elif opt and opt.action == 'store_true': + acc[opt.dest] = True + elif arg.startswith('-'): + opt = opts.get(arg) + if opt: + if opt.takes_value(): + try: + acc[opt.dest] = args[index + 1] + except IndexError: + raise ValueError( + 'Missing required argument for {0}'.format( + arg)) + index += 1 + elif opt.action == 'store_true': + acc[opt.dest] = True + index += 1 + return acc + + def parse_doc(self, doc): + options, in_option = defaultdict(list), None + for line in doc.splitlines(): + if line.startswith('.. cmdoption::'): + m = find_long_opt.match(line) + if m: + in_option = m.groups()[0].strip() + assert in_option, 'missing long opt' + elif in_option and line.startswith(' ' * 4): + options[in_option].append( + find_rst_ref.sub(r'\1', line.strip()).replace('`', '')) + return options + + def with_pool_option(self, argv): + """Return tuple of ``(short_opts, long_opts)`` if the command + supports a pool argument, and used to monkey patch eventlet/gevent + environments as early as possible. + + E.g:: + has_pool_option = (['-P'], ['--pool']) + """ + pass + + def node_format(self, s, nodename, **extra): + return node_format(s, nodename, **extra) + + def host_format(self, s, **extra): + return host_format(s, **extra) + + def _get_default_app(self, *args, **kwargs): + from celery._state import get_current_app + return get_current_app() # omit proxy + + def pretty_list(self, n): + c = self.colored + if not n: + return '- empty -' + return '\n'.join( + str(c.reset(c.white('*'), ' {0}'.format(item))) for item in n + ) + + def pretty_dict_ok_error(self, n): + c = self.colored + try: + return (c.green('OK'), + text.indent(self.pretty(n['ok'])[1], 4)) + except KeyError: + pass + return (c.red('ERROR'), + text.indent(self.pretty(n['error'])[1], 4)) + + def say_remote_command_reply(self, replies): + c = self.colored + node = next(iter(replies)) # <-- take first. + reply = replies[node] + status, preply = self.pretty(reply) + self.say_chat('->', c.cyan(node, ': ') + status, + text.indent(preply, 4) if self.show_reply else '') + + def pretty(self, n): + OK = str(self.colored.green('OK')) + if isinstance(n, list): + return OK, self.pretty_list(n) + if isinstance(n, dict): + if 'ok' in n or 'error' in n: + return self.pretty_dict_ok_error(n) + else: + return OK, json.dumps(n, sort_keys=True, indent=4) + if isinstance(n, string_t): + return OK, string(n) + return OK, pformat(n) + + def say_chat(self, direction, title, body=''): + c = self.colored + if direction == '<-' and self.quiet: + return + dirstr = not self.quiet and c.bold(c.white(direction), ' ') or '' + self.out(c.reset(dirstr, title)) + if body and self.show_body: + self.out(body) + + @property + def colored(self): + if self._colored is None: + self._colored = term.colored(enabled=not self.no_color) + return self._colored + + @colored.setter + def colored(self, obj): + self._colored = obj + + @property + def no_color(self): + return self._no_color + + @no_color.setter + def no_color(self, value): + self._no_color = value + if self._colored is not None: + self._colored.enabled = not self._no_color + + +def daemon_options(default_pidfile=None, default_logfile=None): + return ( + Option('-f', '--logfile', default=default_logfile), + Option('--pidfile', default=default_pidfile), + Option('--uid', default=None), + Option('--gid', default=None), + Option('--umask', default=None), + Option('--executable', default=None), + ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/beat.py b/thesisenv/lib/python3.6/site-packages/celery/bin/beat.py new file mode 100644 index 0000000..4bcbc62 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/bin/beat.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +""" + +The :program:`celery beat` command. + +.. program:: celery beat + +.. seealso:: + + See :ref:`preload-options` and :ref:`daemon-options`. + +.. cmdoption:: --detach + + Detach and run in the background as a daemon. + +.. cmdoption:: -s, --schedule + + Path to the schedule database. Defaults to `celerybeat-schedule`. + The extension '.db' may be appended to the filename. + Default is {default}. + +.. cmdoption:: -S, --scheduler + + Scheduler class to use. + Default is :class:`celery.beat.PersistentScheduler`. + +.. cmdoption:: --max-interval + + Max seconds to sleep between schedule iterations. + +.. cmdoption:: -f, --logfile + + Path to log file. If no logfile is specified, `stderr` is used. + +.. cmdoption:: -l, --loglevel + + Logging level, choose between `DEBUG`, `INFO`, `WARNING`, + `ERROR`, `CRITICAL`, or `FATAL`. + +""" +from __future__ import absolute_import + +from functools import partial + +from celery.platforms import detached, maybe_drop_privileges + +from celery.bin.base import Command, Option, daemon_options + +__all__ = ['beat'] + + +class beat(Command): + """Start the beat periodic task scheduler. + + Examples:: + + celery beat -l info + celery beat -s /var/run/celery/beat-schedule --detach + celery beat -S djcelery.schedulers.DatabaseScheduler + + """ + doc = __doc__ + enable_config_from_cmdline = True + supports_args = False + + def run(self, detach=False, logfile=None, pidfile=None, uid=None, + gid=None, umask=None, working_directory=None, **kwargs): + if not detach: + maybe_drop_privileges(uid=uid, gid=gid) + workdir = working_directory + kwargs.pop('app', None) + beat = partial(self.app.Beat, + logfile=logfile, pidfile=pidfile, **kwargs) + + if detach: + with detached(logfile, pidfile, uid, gid, umask, workdir): + return beat().run() + else: + return beat().run() + + def get_options(self): + c = self.app.conf + + return ( + (Option('--detach', action='store_true'), + Option('-s', '--schedule', + default=c.CELERYBEAT_SCHEDULE_FILENAME), + Option('--max-interval', type='float'), + Option('-S', '--scheduler', dest='scheduler_cls'), + Option('-l', '--loglevel', default=c.CELERYBEAT_LOG_LEVEL)) + + daemon_options(default_pidfile='celerybeat.pid') + + tuple(self.app.user_options['beat']) + ) + + +def main(app=None): + beat(app=app).execute_from_commandline() + +if __name__ == '__main__': # pragma: no cover + main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/celery.py b/thesisenv/lib/python3.6/site-packages/celery/bin/celery.py new file mode 100644 index 0000000..4676b30 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/bin/celery.py @@ -0,0 +1,850 @@ +# -*- coding: utf-8 -*- +""" + +The :program:`celery` umbrella command. + +.. program:: celery + +""" +from __future__ import absolute_import, unicode_literals + +import anyjson +import numbers +import os +import sys + +from functools import partial +from importlib import import_module + +from celery.five import string_t, values +from celery.platforms import EX_OK, EX_FAILURE, EX_UNAVAILABLE, EX_USAGE +from celery.utils import term +from celery.utils import text +from celery.utils.timeutils import maybe_iso8601 + +# Cannot use relative imports here due to a Windows issue (#1111). +from celery.bin.base import Command, Option, Extensions + +# Import commands from other modules +from celery.bin.amqp import amqp +from celery.bin.beat import beat +from celery.bin.events import events +from celery.bin.graph import graph +from celery.bin.worker import worker + +__all__ = ['CeleryCommand', 'main'] + +HELP = """ +---- -- - - ---- Commands- -------------- --- ------------ + +{commands} +---- -- - - --------- -- - -------------- --- ------------ + +Type '{prog_name} --help' for help using a specific command. +""" + +MIGRATE_PROGRESS_FMT = """\ +Migrating task {state.count}/{state.strtotal}: \ +{body[task]}[{body[id]}]\ +""" + +DEBUG = os.environ.get('C_DEBUG', False) + +command_classes = [ + ('Main', ['worker', 'events', 'beat', 'shell', 'multi', 'amqp'], 'green'), + ('Remote Control', ['status', 'inspect', 'control'], 'blue'), + ('Utils', ['purge', 'list', 'migrate', 'call', 'result', 'report'], None), +] +if DEBUG: # pragma: no cover + command_classes.append( + ('Debug', ['graph'], 'red'), + ) + + +def determine_exit_status(ret): + if isinstance(ret, numbers.Integral): + return ret + return EX_OK if ret else EX_FAILURE + + +def main(argv=None): + # Fix for setuptools generated scripts, so that it will + # work with multiprocessing fork emulation. + # (see multiprocessing.forking.get_preparation_data()) + try: + if __name__ != '__main__': # pragma: no cover + sys.modules['__main__'] = sys.modules[__name__] + cmd = CeleryCommand() + cmd.maybe_patch_concurrency() + from billiard import freeze_support + freeze_support() + cmd.execute_from_commandline(argv) + except KeyboardInterrupt: + pass + + +class multi(Command): + """Start multiple worker instances.""" + respects_app_option = False + + def get_options(self): + return () + + def run_from_argv(self, prog_name, argv, command=None): + from celery.bin.multi import MultiTool + multi = MultiTool(quiet=self.quiet, no_color=self.no_color) + return multi.execute_from_commandline( + [command] + argv, prog_name, + ) + + +class list_(Command): + """Get info from broker. + + Examples:: + + celery list bindings + + NOTE: For RabbitMQ the management plugin is required. + """ + args = '[bindings]' + + def list_bindings(self, management): + try: + bindings = management.get_bindings() + except NotImplementedError: + raise self.Error('Your transport cannot list bindings.') + + def fmt(q, e, r): + return self.out('{0:<28} {1:<28} {2}'.format(q, e, r)) + fmt('Queue', 'Exchange', 'Routing Key') + fmt('-' * 16, '-' * 16, '-' * 16) + for b in bindings: + fmt(b['destination'], b['source'], b['routing_key']) + + def run(self, what=None, *_, **kw): + topics = {'bindings': self.list_bindings} + available = ', '.join(topics) + if not what: + raise self.UsageError( + 'You must specify one of {0}'.format(available)) + if what not in topics: + raise self.UsageError( + 'unknown topic {0!r} (choose one of: {1})'.format( + what, available)) + with self.app.connection() as conn: + self.app.amqp.TaskConsumer(conn).declare() + topics[what](conn.manager) + + +class call(Command): + """Call a task by name. + + Examples:: + + celery call tasks.add --args='[2, 2]' + celery call tasks.add --args='[2, 2]' --countdown=10 + """ + args = '' + option_list = Command.option_list + ( + Option('--args', '-a', help='positional arguments (json).'), + Option('--kwargs', '-k', help='keyword arguments (json).'), + Option('--eta', help='scheduled time (ISO-8601).'), + Option('--countdown', type='float', + help='eta in seconds from now (float/int).'), + Option('--expires', help='expiry time (ISO-8601/float/int).'), + Option('--serializer', default='json', help='defaults to json.'), + Option('--queue', help='custom queue name.'), + Option('--exchange', help='custom exchange name.'), + Option('--routing-key', help='custom routing key.'), + ) + + def run(self, name, *_, **kw): + # Positional args. + args = kw.get('args') or () + if isinstance(args, string_t): + args = anyjson.loads(args) + + # Keyword args. + kwargs = kw.get('kwargs') or {} + if isinstance(kwargs, string_t): + kwargs = anyjson.loads(kwargs) + + # Expires can be int/float. + expires = kw.get('expires') or None + try: + expires = float(expires) + except (TypeError, ValueError): + # or a string describing an ISO 8601 datetime. + try: + expires = maybe_iso8601(expires) + except (TypeError, ValueError): + raise + + res = self.app.send_task(name, args=args, kwargs=kwargs, + countdown=kw.get('countdown'), + serializer=kw.get('serializer'), + queue=kw.get('queue'), + exchange=kw.get('exchange'), + routing_key=kw.get('routing_key'), + eta=maybe_iso8601(kw.get('eta')), + expires=expires) + self.out(res.id) + + +class purge(Command): + """Erase all messages from all known task queues. + + WARNING: There is no undo operation for this command. + + """ + warn_prelude = ( + '{warning}: This will remove all tasks from {queues}: {names}.\n' + ' There is no undo for this operation!\n\n' + '(to skip this prompt use the -f option)\n' + ) + warn_prompt = 'Are you sure you want to delete all tasks' + fmt_purged = 'Purged {mnum} {messages} from {qnum} known task {queues}.' + fmt_empty = 'No messages purged from {qnum} {queues}' + option_list = Command.option_list + ( + Option('--force', '-f', action='store_true', + help='Do not prompt for verification'), + ) + + def run(self, force=False, **kwargs): + names = list(sorted(self.app.amqp.queues.keys())) + qnum = len(names) + if not force: + self.out(self.warn_prelude.format( + warning=self.colored.red('WARNING'), + queues=text.pluralize(qnum, 'queue'), names=', '.join(names), + )) + if self.ask(self.warn_prompt, ('yes', 'no'), 'no') != 'yes': + return + messages = self.app.control.purge() + fmt = self.fmt_purged if messages else self.fmt_empty + self.out(fmt.format( + mnum=messages, qnum=qnum, + messages=text.pluralize(messages, 'message'), + queues=text.pluralize(qnum, 'queue'))) + + +class result(Command): + """Gives the return value for a given task id. + + Examples:: + + celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500 + celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500 -t tasks.add + celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500 --traceback + + """ + args = '' + option_list = Command.option_list + ( + Option('--task', '-t', help='name of task (if custom backend)'), + Option('--traceback', action='store_true', + help='show traceback instead'), + ) + + def run(self, task_id, *args, **kwargs): + result_cls = self.app.AsyncResult + task = kwargs.get('task') + traceback = kwargs.get('traceback', False) + + if task: + result_cls = self.app.tasks[task].AsyncResult + result = result_cls(task_id) + if traceback: + value = result.traceback + else: + value = result.get() + self.out(self.pretty(value)[1]) + + +class _RemoteControl(Command): + name = None + choices = None + leaf = False + option_list = Command.option_list + ( + Option('--timeout', '-t', type='float', + help='Timeout in seconds (float) waiting for reply'), + Option('--destination', '-d', + help='Comma separated list of destination node names.')) + + def __init__(self, *args, **kwargs): + self.show_body = kwargs.pop('show_body', True) + self.show_reply = kwargs.pop('show_reply', True) + super(_RemoteControl, self).__init__(*args, **kwargs) + + @classmethod + def get_command_info(self, command, + indent=0, prefix='', color=None, help=False): + if help: + help = '|' + text.indent(self.choices[command][1], indent + 4) + else: + help = None + try: + # see if it uses args. + meth = getattr(self, command) + return text.join([ + '|' + text.indent('{0}{1} {2}'.format( + prefix, color(command), meth.__doc__), indent), + help, + ]) + + except AttributeError: + return text.join([ + '|' + text.indent(prefix + str(color(command)), indent), help, + ]) + + @classmethod + def list_commands(self, indent=0, prefix='', color=None, help=False): + color = color if color else lambda x: x + prefix = prefix + ' ' if prefix else '' + return '\n'.join(self.get_command_info(c, indent, prefix, color, help) + for c in sorted(self.choices)) + + @property + def epilog(self): + return '\n'.join([ + '[Commands]', + self.list_commands(indent=4, help=True) + ]) + + def usage(self, command): + return '%prog {0} [options] {1} [arg1 .. argN]'.format( + command, self.args) + + def call(self, *args, **kwargs): + raise NotImplementedError('call') + + def run(self, *args, **kwargs): + if not args: + raise self.UsageError( + 'Missing {0.name} method. See --help'.format(self)) + return self.do_call_method(args, **kwargs) + + def do_call_method(self, args, **kwargs): + method = args[0] + if method == 'help': + raise self.Error("Did you mean '{0.name} --help'?".format(self)) + if method not in self.choices: + raise self.UsageError( + 'Unknown {0.name} method {1}'.format(self, method)) + + if self.app.connection().transport.driver_type == 'sql': + raise self.Error('Broadcast not supported by SQL broker transport') + + destination = kwargs.get('destination') + timeout = kwargs.get('timeout') or self.choices[method][0] + if destination and isinstance(destination, string_t): + destination = [dest.strip() for dest in destination.split(',')] + + handler = getattr(self, method, self.call) + + replies = handler(method, *args[1:], timeout=timeout, + destination=destination, + callback=self.say_remote_command_reply) + if not replies: + raise self.Error('No nodes replied within time constraint.', + status=EX_UNAVAILABLE) + return replies + + +class inspect(_RemoteControl): + """Inspect the worker at runtime. + + Availability: RabbitMQ (amqp), Redis, and MongoDB transports. + + Examples:: + + celery inspect active --timeout=5 + celery inspect scheduled -d worker1@example.com + celery inspect revoked -d w1@e.com,w2@e.com + + """ + name = 'inspect' + choices = { + 'active': (1.0, 'dump active tasks (being processed)'), + 'active_queues': (1.0, 'dump queues being consumed from'), + 'scheduled': (1.0, 'dump scheduled tasks (eta/countdown/retry)'), + 'reserved': (1.0, 'dump reserved tasks (waiting to be processed)'), + 'stats': (1.0, 'dump worker statistics'), + 'revoked': (1.0, 'dump of revoked task ids'), + 'registered': (1.0, 'dump of registered tasks'), + 'ping': (0.2, 'ping worker(s)'), + 'clock': (1.0, 'get value of logical clock'), + 'conf': (1.0, 'dump worker configuration'), + 'report': (1.0, 'get bugreport info'), + 'memsample': (1.0, 'sample memory (requires psutil)'), + 'memdump': (1.0, 'dump memory samples (requires psutil)'), + 'objgraph': (60.0, 'create object graph (requires objgraph)'), + } + + def call(self, method, *args, **options): + i = self.app.control.inspect(**options) + return getattr(i, method)(*args) + + def objgraph(self, type_='Request', *args, **kwargs): + return self.call('objgraph', type_, **kwargs) + + def conf(self, with_defaults=False, *args, **kwargs): + return self.call('conf', with_defaults, **kwargs) + + +class control(_RemoteControl): + """Workers remote control. + + Availability: RabbitMQ (amqp), Redis, and MongoDB transports. + + Examples:: + + celery control enable_events --timeout=5 + celery control -d worker1@example.com enable_events + celery control -d w1.e.com,w2.e.com enable_events + + celery control -d w1.e.com add_consumer queue_name + celery control -d w1.e.com cancel_consumer queue_name + + celery control -d w1.e.com add_consumer queue exchange direct rkey + + """ + name = 'control' + choices = { + 'enable_events': (1.0, 'tell worker(s) to enable events'), + 'disable_events': (1.0, 'tell worker(s) to disable events'), + 'add_consumer': (1.0, 'tell worker(s) to start consuming a queue'), + 'cancel_consumer': (1.0, 'tell worker(s) to stop consuming a queue'), + 'rate_limit': ( + 1.0, 'tell worker(s) to modify the rate limit for a task type'), + 'time_limit': ( + 1.0, 'tell worker(s) to modify the time limit for a task type.'), + 'autoscale': (1.0, 'change autoscale settings'), + 'pool_grow': (1.0, 'start more pool processes'), + 'pool_shrink': (1.0, 'use less pool processes'), + } + + def call(self, method, *args, **options): + return getattr(self.app.control, method)(*args, reply=True, **options) + + def pool_grow(self, method, n=1, **kwargs): + """[N=1]""" + return self.call(method, int(n), **kwargs) + + def pool_shrink(self, method, n=1, **kwargs): + """[N=1]""" + return self.call(method, int(n), **kwargs) + + def autoscale(self, method, max=None, min=None, **kwargs): + """[max] [min]""" + return self.call(method, int(max), int(min), **kwargs) + + def rate_limit(self, method, task_name, rate_limit, **kwargs): + """ (e.g. 5/s | 5/m | 5/h)>""" + return self.call(method, task_name, rate_limit, **kwargs) + + def time_limit(self, method, task_name, soft, hard=None, **kwargs): + """ [hard_secs]""" + return self.call(method, task_name, + float(soft), float(hard), **kwargs) + + def add_consumer(self, method, queue, exchange=None, + exchange_type='direct', routing_key=None, **kwargs): + """ [exchange [type [routing_key]]]""" + return self.call(method, queue, exchange, + exchange_type, routing_key, **kwargs) + + def cancel_consumer(self, method, queue, **kwargs): + """""" + return self.call(method, queue, **kwargs) + + +class status(Command): + """Show list of workers that are online.""" + option_list = inspect.option_list + + def run(self, *args, **kwargs): + I = inspect( + app=self.app, + no_color=kwargs.get('no_color', False), + stdout=self.stdout, stderr=self.stderr, + show_reply=False, show_body=False, quiet=True, + ) + replies = I.run('ping', **kwargs) + if not replies: + raise self.Error('No nodes replied within time constraint', + status=EX_UNAVAILABLE) + nodecount = len(replies) + if not kwargs.get('quiet', False): + self.out('\n{0} {1} online.'.format( + nodecount, text.pluralize(nodecount, 'node'))) + + +class migrate(Command): + """Migrate tasks from one broker to another. + + Examples:: + + celery migrate redis://localhost amqp://guest@localhost// + celery migrate django:// redis://localhost + + NOTE: This command is experimental, make sure you have + a backup of the tasks before you continue. + """ + args = ' ' + option_list = Command.option_list + ( + Option('--limit', '-n', type='int', + help='Number of tasks to consume (int)'), + Option('--timeout', '-t', type='float', default=1.0, + help='Timeout in seconds (float) waiting for tasks'), + Option('--ack-messages', '-a', action='store_true', + help='Ack messages from source broker.'), + Option('--tasks', '-T', + help='List of task names to filter on.'), + Option('--queues', '-Q', + help='List of queues to migrate.'), + Option('--forever', '-F', action='store_true', + help='Continually migrate tasks until killed.'), + ) + progress_fmt = MIGRATE_PROGRESS_FMT + + def on_migrate_task(self, state, body, message): + self.out(self.progress_fmt.format(state=state, body=body)) + + def run(self, source, destination, **kwargs): + from kombu import Connection + from celery.contrib.migrate import migrate_tasks + + migrate_tasks(Connection(source), + Connection(destination), + callback=self.on_migrate_task, + **kwargs) + + +class shell(Command): # pragma: no cover + """Start shell session with convenient access to celery symbols. + + The following symbols will be added to the main globals: + + - celery: the current application. + - chord, group, chain, chunks, + xmap, xstarmap subtask, Task + - all registered tasks. + + """ + option_list = Command.option_list + ( + Option('--ipython', '-I', + action='store_true', dest='force_ipython', + help='force iPython.'), + Option('--bpython', '-B', + action='store_true', dest='force_bpython', + help='force bpython.'), + Option('--python', '-P', + action='store_true', dest='force_python', + help='force default Python shell.'), + Option('--without-tasks', '-T', action='store_true', + help="don't add tasks to locals."), + Option('--eventlet', action='store_true', + help='use eventlet.'), + Option('--gevent', action='store_true', help='use gevent.'), + ) + + def run(self, force_ipython=False, force_bpython=False, + force_python=False, without_tasks=False, eventlet=False, + gevent=False, **kwargs): + sys.path.insert(0, os.getcwd()) + if eventlet: + import_module('celery.concurrency.eventlet') + if gevent: + import_module('celery.concurrency.gevent') + import celery + import celery.task.base + self.app.loader.import_default_modules() + self.locals = {'app': self.app, + 'celery': self.app, + 'Task': celery.Task, + 'chord': celery.chord, + 'group': celery.group, + 'chain': celery.chain, + 'chunks': celery.chunks, + 'xmap': celery.xmap, + 'xstarmap': celery.xstarmap, + 'subtask': celery.subtask, + 'signature': celery.signature} + + if not without_tasks: + self.locals.update(dict( + (task.__name__, task) for task in values(self.app.tasks) + if not task.name.startswith('celery.')), + ) + + if force_python: + return self.invoke_fallback_shell() + elif force_bpython: + return self.invoke_bpython_shell() + elif force_ipython: + return self.invoke_ipython_shell() + return self.invoke_default_shell() + + def invoke_default_shell(self): + try: + import IPython # noqa + except ImportError: + try: + import bpython # noqa + except ImportError: + return self.invoke_fallback_shell() + else: + return self.invoke_bpython_shell() + else: + return self.invoke_ipython_shell() + + def invoke_fallback_shell(self): + import code + try: + import readline + except ImportError: + pass + else: + import rlcompleter + readline.set_completer( + rlcompleter.Completer(self.locals).complete) + readline.parse_and_bind('tab:complete') + code.interact(local=self.locals) + + def invoke_ipython_shell(self): + for ip in (self._ipython, self._ipython_pre_10, + self._ipython_terminal, self._ipython_010, + self._no_ipython): + try: + return ip() + except ImportError: + pass + + def _ipython(self): + from IPython import start_ipython + start_ipython(argv=[], user_ns=self.locals) + + def _ipython_pre_10(self): # pragma: no cover + from IPython.frontend.terminal.ipapp import TerminalIPythonApp + app = TerminalIPythonApp.instance() + app.initialize(argv=[]) + app.shell.user_ns.update(self.locals) + app.start() + + def _ipython_terminal(self): # pragma: no cover + from IPython.terminal import embed + embed.TerminalInteractiveShell(user_ns=self.locals).mainloop() + + def _ipython_010(self): # pragma: no cover + from IPython.Shell import IPShell + IPShell(argv=[], user_ns=self.locals).mainloop() + + def _no_ipython(self): # pragma: no cover + raise ImportError("no suitable ipython found") + + def invoke_bpython_shell(self): + import bpython + bpython.embed(self.locals) + + +class help(Command): + """Show help screen and exit.""" + + def usage(self, command): + return '%prog [options] {0.args}'.format(self) + + def run(self, *args, **kwargs): + self.parser.print_help() + self.out(HELP.format( + prog_name=self.prog_name, + commands=CeleryCommand.list_commands(colored=self.colored), + )) + + return EX_USAGE + + +class report(Command): + """Shows information useful to include in bugreports.""" + + def run(self, *args, **kwargs): + self.out(self.app.bugreport()) + return EX_OK + + +class CeleryCommand(Command): + namespace = 'celery' + ext_fmt = '{self.namespace}.commands' + commands = { + 'amqp': amqp, + 'beat': beat, + 'call': call, + 'control': control, + 'events': events, + 'graph': graph, + 'help': help, + 'inspect': inspect, + 'list': list_, + 'migrate': migrate, + 'multi': multi, + 'purge': purge, + 'report': report, + 'result': result, + 'shell': shell, + 'status': status, + 'worker': worker, + + } + enable_config_from_cmdline = True + prog_name = 'celery' + + @classmethod + def register_command(cls, fun, name=None): + cls.commands[name or fun.__name__] = fun + return fun + + def execute(self, command, argv=None): + try: + cls = self.commands[command] + except KeyError: + cls, argv = self.commands['help'], ['help'] + cls = self.commands.get(command) or self.commands['help'] + try: + return cls( + app=self.app, on_error=self.on_error, + no_color=self.no_color, quiet=self.quiet, + on_usage_error=partial(self.on_usage_error, command=command), + ).run_from_argv(self.prog_name, argv[1:], command=argv[0]) + except self.UsageError as exc: + self.on_usage_error(exc) + return exc.status + except self.Error as exc: + self.on_error(exc) + return exc.status + + def on_usage_error(self, exc, command=None): + if command: + helps = '{self.prog_name} {command} --help' + else: + helps = '{self.prog_name} --help' + self.error(self.colored.magenta('Error: {0}'.format(exc))) + self.error("""Please try '{0}'""".format(helps.format( + self=self, command=command, + ))) + + def _relocate_args_from_start(self, argv, index=0): + if argv: + rest = [] + while index < len(argv): + value = argv[index] + if value.startswith('--'): + rest.append(value) + elif value.startswith('-'): + # we eat the next argument even though we don't know + # if this option takes an argument or not. + # instead we will assume what is the command name in the + # return statements below. + try: + nxt = argv[index + 1] + if nxt.startswith('-'): + # is another option + rest.append(value) + else: + # is (maybe) a value for this option + rest.extend([value, nxt]) + index += 1 + except IndexError: + rest.append(value) + break + else: + break + index += 1 + if argv[index:]: + # if there are more arguments left then divide and swap + # we assume the first argument in argv[i:] is the command + # name. + return argv[index:] + rest + # if there are no more arguments then the last arg in rest' + # must be the command. + [rest.pop()] + rest + return [] + + def prepare_prog_name(self, name): + if name == '__main__.py': + return sys.modules['__main__'].__file__ + return name + + def handle_argv(self, prog_name, argv): + self.prog_name = self.prepare_prog_name(prog_name) + argv = self._relocate_args_from_start(argv) + _, argv = self.prepare_args(None, argv) + try: + command = argv[0] + except IndexError: + command, argv = 'help', ['help'] + return self.execute(command, argv) + + def execute_from_commandline(self, argv=None): + argv = sys.argv if argv is None else argv + if 'multi' in argv[1:3]: # Issue 1008 + self.respects_app_option = False + try: + sys.exit(determine_exit_status( + super(CeleryCommand, self).execute_from_commandline(argv))) + except KeyboardInterrupt: + sys.exit(EX_FAILURE) + + @classmethod + def get_command_info(self, command, indent=0, color=None, colored=None): + colored = term.colored() if colored is None else colored + colored = colored.names[color] if color else lambda x: x + obj = self.commands[command] + cmd = 'celery {0}'.format(colored(command)) + if obj.leaf: + return '|' + text.indent(cmd, indent) + return text.join([ + ' ', + '|' + text.indent('{0} --help'.format(cmd), indent), + obj.list_commands(indent, 'celery {0}'.format(command), colored), + ]) + + @classmethod + def list_commands(self, indent=0, colored=None): + colored = term.colored() if colored is None else colored + white = colored.white + ret = [] + for cls, commands, color in command_classes: + ret.extend([ + text.indent('+ {0}: '.format(white(cls)), indent), + '\n'.join( + self.get_command_info(command, indent + 4, color, colored) + for command in commands), + '' + ]) + return '\n'.join(ret).strip() + + def with_pool_option(self, argv): + if len(argv) > 1 and 'worker' in argv[0:3]: + # this command supports custom pools + # that may have to be loaded as early as possible. + return (['-P'], ['--pool']) + + def on_concurrency_setup(self): + self.load_extension_commands() + + def load_extension_commands(self): + names = Extensions(self.ext_fmt.format(self=self), + self.register_command).load() + if names: + command_classes.append(('Extensions', names, 'magenta')) + + +def command(*args, **kwargs): + """Deprecated: Use classmethod :meth:`CeleryCommand.register_command` + instead.""" + _register = CeleryCommand.register_command + return _register(args[0]) if args else _register + + +if __name__ == '__main__': # pragma: no cover + main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/celeryd_detach.py b/thesisenv/lib/python3.6/site-packages/celery/bin/celeryd_detach.py new file mode 100644 index 0000000..4d37d5f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/bin/celeryd_detach.py @@ -0,0 +1,181 @@ +# -*- coding: utf-8 -*- +""" + celery.bin.celeryd_detach + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Program used to daemonize the worker + + Using :func:`os.execv` because forking and multiprocessing + leads to weird issues (it was a long time ago now, but it + could have something to do with the threading mutex bug) + +""" +from __future__ import absolute_import + +import celery +import os +import sys + +from optparse import OptionParser, BadOptionError + +from celery.platforms import EX_FAILURE, detached +from celery.utils import default_nodename, node_format +from celery.utils.log import get_logger + +from celery.bin.base import daemon_options, Option + +__all__ = ['detached_celeryd', 'detach'] + +logger = get_logger(__name__) + +C_FAKEFORK = os.environ.get('C_FAKEFORK') + +OPTION_LIST = daemon_options(default_pidfile='celeryd.pid') + ( + Option('--workdir', default=None, dest='working_directory'), + Option('-n', '--hostname'), + Option('--fake', + default=False, action='store_true', dest='fake', + help="Don't fork (for debugging purposes)"), +) + + +def detach(path, argv, logfile=None, pidfile=None, uid=None, + gid=None, umask=None, working_directory=None, fake=False, app=None, + executable=None, hostname=None): + hostname = default_nodename(hostname) + logfile = node_format(logfile, hostname) + pidfile = node_format(pidfile, hostname) + fake = 1 if C_FAKEFORK else fake + with detached(logfile, pidfile, uid, gid, umask, working_directory, fake, + after_forkers=False): + try: + if executable is not None: + path = executable + os.execv(path, [path] + argv) + except Exception: + if app is None: + from celery import current_app + app = current_app + app.log.setup_logging_subsystem( + 'ERROR', logfile, hostname=hostname) + logger.critical("Can't exec %r", ' '.join([path] + argv), + exc_info=True) + return EX_FAILURE + + +class PartialOptionParser(OptionParser): + + def __init__(self, *args, **kwargs): + self.leftovers = [] + OptionParser.__init__(self, *args, **kwargs) + + def _process_long_opt(self, rargs, values): + arg = rargs.pop(0) + + if '=' in arg: + opt, next_arg = arg.split('=', 1) + rargs.insert(0, next_arg) + had_explicit_value = True + else: + opt = arg + had_explicit_value = False + + try: + opt = self._match_long_opt(opt) + option = self._long_opt.get(opt) + except BadOptionError: + option = None + + if option: + if option.takes_value(): + nargs = option.nargs + if len(rargs) < nargs: + if nargs == 1: + self.error('{0} requires an argument'.format(opt)) + else: + self.error('{0} requires {1} arguments'.format( + opt, nargs)) + elif nargs == 1: + value = rargs.pop(0) + else: + value = tuple(rargs[0:nargs]) + del rargs[0:nargs] + + elif had_explicit_value: + self.error('{0} option does not take a value'.format(opt)) + else: + value = None + option.process(opt, value, values, self) + else: + self.leftovers.append(arg) + + def _process_short_opts(self, rargs, values): + arg = rargs[0] + try: + OptionParser._process_short_opts(self, rargs, values) + except BadOptionError: + self.leftovers.append(arg) + if rargs and not rargs[0][0] == '-': + self.leftovers.append(rargs.pop(0)) + + +class detached_celeryd(object): + option_list = OPTION_LIST + usage = '%prog [options] [celeryd options]' + version = celery.VERSION_BANNER + description = ('Detaches Celery worker nodes. See `celery worker --help` ' + 'for the list of supported worker arguments.') + command = sys.executable + execv_path = sys.executable + if sys.version_info < (2, 7): # does not support pkg/__main__.py + execv_argv = ['-m', 'celery.__main__', 'worker'] + else: + execv_argv = ['-m', 'celery', 'worker'] + + def __init__(self, app=None): + self.app = app + + def Parser(self, prog_name): + return PartialOptionParser(prog=prog_name, + option_list=self.option_list, + usage=self.usage, + description=self.description, + version=self.version) + + def parse_options(self, prog_name, argv): + parser = self.Parser(prog_name) + options, values = parser.parse_args(argv) + if options.logfile: + parser.leftovers.append('--logfile={0}'.format(options.logfile)) + if options.pidfile: + parser.leftovers.append('--pidfile={0}'.format(options.pidfile)) + if options.hostname: + parser.leftovers.append('--hostname={0}'.format(options.hostname)) + return options, values, parser.leftovers + + def execute_from_commandline(self, argv=None): + if argv is None: + argv = sys.argv + config = [] + seen_cargs = 0 + for arg in argv: + if seen_cargs: + config.append(arg) + else: + if arg == '--': + seen_cargs = 1 + config.append(arg) + prog_name = os.path.basename(argv[0]) + options, values, leftovers = self.parse_options(prog_name, argv[1:]) + sys.exit(detach( + app=self.app, path=self.execv_path, + argv=self.execv_argv + leftovers + config, + **vars(options) + )) + + +def main(app=None): + detached_celeryd(app).execute_from_commandline() + +if __name__ == '__main__': # pragma: no cover + main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/events.py b/thesisenv/lib/python3.6/site-packages/celery/bin/events.py new file mode 100644 index 0000000..8cc61b6 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/bin/events.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +""" + +The :program:`celery events` command. + +.. program:: celery events + +.. seealso:: + + See :ref:`preload-options` and :ref:`daemon-options`. + +.. cmdoption:: -d, --dump + + Dump events to stdout. + +.. cmdoption:: -c, --camera + + Take snapshots of events using this camera. + +.. cmdoption:: --detach + + Camera: Detach and run in the background as a daemon. + +.. cmdoption:: -F, --freq, --frequency + + Camera: Shutter frequency. Default is every 1.0 seconds. + +.. cmdoption:: -r, --maxrate + + Camera: Optional shutter rate limit (e.g. 10/m). + +.. cmdoption:: -l, --loglevel + + Logging level, choose between `DEBUG`, `INFO`, `WARNING`, + `ERROR`, `CRITICAL`, or `FATAL`. Default is INFO. + +""" +from __future__ import absolute_import, unicode_literals + +import sys + +from functools import partial + +from celery.platforms import detached, set_process_title, strargv +from celery.bin.base import Command, Option, daemon_options + +__all__ = ['events'] + + +class events(Command): + """Event-stream utilities. + + Commands:: + + celery events --app=proj + start graphical monitor (requires curses) + celery events -d --app=proj + dump events to screen. + celery events -b amqp:// + celery events -c [options] + run snapshot camera. + + Examples:: + + celery events + celery events -d + celery events -c mod.attr -F 1.0 --detach --maxrate=100/m -l info + """ + doc = __doc__ + supports_args = False + + def run(self, dump=False, camera=None, frequency=1.0, maxrate=None, + loglevel='INFO', logfile=None, prog_name='celery events', + pidfile=None, uid=None, gid=None, umask=None, + working_directory=None, detach=False, **kwargs): + self.prog_name = prog_name + + if dump: + return self.run_evdump() + if camera: + return self.run_evcam(camera, freq=frequency, maxrate=maxrate, + loglevel=loglevel, logfile=logfile, + pidfile=pidfile, uid=uid, gid=gid, + umask=umask, + working_directory=working_directory, + detach=detach) + return self.run_evtop() + + def run_evdump(self): + from celery.events.dumper import evdump + self.set_process_status('dump') + return evdump(app=self.app) + + def run_evtop(self): + from celery.events.cursesmon import evtop + self.set_process_status('top') + return evtop(app=self.app) + + def run_evcam(self, camera, logfile=None, pidfile=None, uid=None, + gid=None, umask=None, working_directory=None, + detach=False, **kwargs): + from celery.events.snapshot import evcam + workdir = working_directory + self.set_process_status('cam') + kwargs['app'] = self.app + cam = partial(evcam, camera, + logfile=logfile, pidfile=pidfile, **kwargs) + + if detach: + with detached(logfile, pidfile, uid, gid, umask, workdir): + return cam() + else: + return cam() + + def set_process_status(self, prog, info=''): + prog = '{0}:{1}'.format(self.prog_name, prog) + info = '{0} {1}'.format(info, strargv(sys.argv)) + return set_process_title(prog, info=info) + + def get_options(self): + return ( + (Option('-d', '--dump', action='store_true'), + Option('-c', '--camera'), + Option('--detach', action='store_true'), + Option('-F', '--frequency', '--freq', + type='float', default=1.0), + Option('-r', '--maxrate'), + Option('-l', '--loglevel', default='INFO')) + + daemon_options(default_pidfile='celeryev.pid') + + tuple(self.app.user_options['events']) + ) + + +def main(): + ev = events() + ev.execute_from_commandline() + +if __name__ == '__main__': # pragma: no cover + main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/graph.py b/thesisenv/lib/python3.6/site-packages/celery/bin/graph.py new file mode 100644 index 0000000..5d58476 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/bin/graph.py @@ -0,0 +1,191 @@ +# -*- coding: utf-8 -*- +""" + +The :program:`celery graph` command. + +.. program:: celery graph + +""" +from __future__ import absolute_import, unicode_literals + +from operator import itemgetter + +from celery.datastructures import DependencyGraph, GraphFormatter +from celery.five import items + +from .base import Command + +__all__ = ['graph'] + + +class graph(Command): + args = """ [arguments] + ..... bootsteps [worker] [consumer] + ..... workers [enumerate] + """ + + def run(self, what=None, *args, **kwargs): + map = {'bootsteps': self.bootsteps, 'workers': self.workers} + if not what: + raise self.UsageError('missing type') + elif what not in map: + raise self.Error('no graph {0} in {1}'.format(what, '|'.join(map))) + return map[what](*args, **kwargs) + + def bootsteps(self, *args, **kwargs): + worker = self.app.WorkController() + include = set(arg.lower() for arg in args or ['worker', 'consumer']) + if 'worker' in include: + graph = worker.blueprint.graph + if 'consumer' in include: + worker.blueprint.connect_with(worker.consumer.blueprint) + else: + graph = worker.consumer.blueprint.graph + graph.to_dot(self.stdout) + + def workers(self, *args, **kwargs): + + def simplearg(arg): + return maybe_list(itemgetter(0, 2)(arg.partition(':'))) + + def maybe_list(l, sep=','): + return (l[0], l[1].split(sep) if sep in l[1] else l[1]) + + args = dict(simplearg(arg) for arg in args) + generic = 'generic' in args + + def generic_label(node): + return '{0} ({1}://)'.format(type(node).__name__, + node._label.split('://')[0]) + + class Node(object): + force_label = None + scheme = {} + + def __init__(self, label, pos=None): + self._label = label + self.pos = pos + + def label(self): + return self._label + + def __str__(self): + return self.label() + + class Thread(Node): + scheme = {'fillcolor': 'lightcyan4', 'fontcolor': 'yellow', + 'shape': 'oval', 'fontsize': 10, 'width': 0.3, + 'color': 'black'} + + def __init__(self, label, **kwargs): + self._label = 'thr-{0}'.format(next(tids)) + self.real_label = label + self.pos = 0 + + class Formatter(GraphFormatter): + + def label(self, obj): + return obj and obj.label() + + def node(self, obj): + scheme = dict(obj.scheme) if obj.pos else obj.scheme + if isinstance(obj, Thread): + scheme['label'] = obj.real_label + return self.draw_node( + obj, dict(self.node_scheme, **scheme), + ) + + def terminal_node(self, obj): + return self.draw_node( + obj, dict(self.term_scheme, **obj.scheme), + ) + + def edge(self, a, b, **attrs): + if isinstance(a, Thread): + attrs.update(arrowhead='none', arrowtail='tee') + return self.draw_edge(a, b, self.edge_scheme, attrs) + + def subscript(n): + S = {'0': '₀', '1': '₁', '2': '₂', '3': '₃', '4': '₄', + '5': '₅', '6': '₆', '7': '₇', '8': '₈', '9': '₉'} + return ''.join([S[i] for i in str(n)]) + + class Worker(Node): + pass + + class Backend(Node): + scheme = {'shape': 'folder', 'width': 2, + 'height': 1, 'color': 'black', + 'fillcolor': 'peachpuff3', 'color': 'peachpuff4'} + + def label(self): + return generic_label(self) if generic else self._label + + class Broker(Node): + scheme = {'shape': 'circle', 'fillcolor': 'cadetblue3', + 'color': 'cadetblue4', 'height': 1} + + def label(self): + return generic_label(self) if generic else self._label + + from itertools import count + tids = count(1) + Wmax = int(args.get('wmax', 4) or 0) + Tmax = int(args.get('tmax', 3) or 0) + + def maybe_abbr(l, name, max=Wmax): + size = len(l) + abbr = max and size > max + if 'enumerate' in args: + l = ['{0}{1}'.format(name, subscript(i + 1)) + for i, obj in enumerate(l)] + if abbr: + l = l[0:max - 1] + [l[size - 1]] + l[max - 2] = '{0}⎨…{1}⎬'.format( + name[0], subscript(size - (max - 1))) + return l + + try: + workers = args['nodes'] + threads = args.get('threads') or [] + except KeyError: + replies = self.app.control.inspect().stats() + workers, threads = [], [] + for worker, reply in items(replies): + workers.append(worker) + threads.append(reply['pool']['max-concurrency']) + + wlen = len(workers) + backend = args.get('backend', self.app.conf.CELERY_RESULT_BACKEND) + threads_for = {} + workers = maybe_abbr(workers, 'Worker') + if Wmax and wlen > Wmax: + threads = threads[0:3] + [threads[-1]] + for i, threads in enumerate(threads): + threads_for[workers[i]] = maybe_abbr( + list(range(int(threads))), 'P', Tmax, + ) + + broker = Broker(args.get('broker', self.app.connection().as_uri())) + backend = Backend(backend) if backend else None + graph = DependencyGraph(formatter=Formatter()) + graph.add_arc(broker) + if backend: + graph.add_arc(backend) + curworker = [0] + for i, worker in enumerate(workers): + worker = Worker(worker, pos=i) + graph.add_arc(worker) + graph.add_edge(worker, broker) + if backend: + graph.add_edge(worker, backend) + threads = threads_for.get(worker._label) + if threads: + for thread in threads: + thread = Thread(thread) + graph.add_arc(thread) + graph.add_edge(thread, worker) + + curworker[0] += 1 + + graph.to_dot(self.stdout) diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/multi.py b/thesisenv/lib/python3.6/site-packages/celery/bin/multi.py new file mode 100644 index 0000000..f30aa9e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/bin/multi.py @@ -0,0 +1,646 @@ +# -*- coding: utf-8 -*- +""" + +.. program:: celery multi + +Examples +======== + +.. code-block:: bash + + # Single worker with explicit name and events enabled. + $ celery multi start Leslie -E + + # Pidfiles and logfiles are stored in the current directory + # by default. Use --pidfile and --logfile argument to change + # this. The abbreviation %N will be expanded to the current + # node name. + $ celery multi start Leslie -E --pidfile=/var/run/celery/%N.pid + --logfile=/var/log/celery/%N.log + + + # You need to add the same arguments when you restart, + # as these are not persisted anywhere. + $ celery multi restart Leslie -E --pidfile=/var/run/celery/%N.pid + --logfile=/var/run/celery/%N.log + + # To stop the node, you need to specify the same pidfile. + $ celery multi stop Leslie --pidfile=/var/run/celery/%N.pid + + # 3 workers, with 3 processes each + $ celery multi start 3 -c 3 + celery worker -n celery1@myhost -c 3 + celery worker -n celery2@myhost -c 3 + celery worker -n celery3@myhost -c 3 + + # start 3 named workers + $ celery multi start image video data -c 3 + celery worker -n image@myhost -c 3 + celery worker -n video@myhost -c 3 + celery worker -n data@myhost -c 3 + + # specify custom hostname + $ celery multi start 2 --hostname=worker.example.com -c 3 + celery worker -n celery1@worker.example.com -c 3 + celery worker -n celery2@worker.example.com -c 3 + + # specify fully qualified nodenames + $ celery multi start foo@worker.example.com bar@worker.example.com -c 3 + + # Advanced example starting 10 workers in the background: + # * Three of the workers processes the images and video queue + # * Two of the workers processes the data queue with loglevel DEBUG + # * the rest processes the default' queue. + $ celery multi start 10 -l INFO -Q:1-3 images,video -Q:4,5 data + -Q default -L:4,5 DEBUG + + # You can show the commands necessary to start the workers with + # the 'show' command: + $ celery multi show 10 -l INFO -Q:1-3 images,video -Q:4,5 data + -Q default -L:4,5 DEBUG + + # Additional options are added to each celery worker' comamnd, + # but you can also modify the options for ranges of, or specific workers + + # 3 workers: Two with 3 processes, and one with 10 processes. + $ celery multi start 3 -c 3 -c:1 10 + celery worker -n celery1@myhost -c 10 + celery worker -n celery2@myhost -c 3 + celery worker -n celery3@myhost -c 3 + + # can also specify options for named workers + $ celery multi start image video data -c 3 -c:image 10 + celery worker -n image@myhost -c 10 + celery worker -n video@myhost -c 3 + celery worker -n data@myhost -c 3 + + # ranges and lists of workers in options is also allowed: + # (-c:1-3 can also be written as -c:1,2,3) + $ celery multi start 5 -c 3 -c:1-3 10 + celery worker -n celery1@myhost -c 10 + celery worker -n celery2@myhost -c 10 + celery worker -n celery3@myhost -c 10 + celery worker -n celery4@myhost -c 3 + celery worker -n celery5@myhost -c 3 + + # lists also works with named workers + $ celery multi start foo bar baz xuzzy -c 3 -c:foo,bar,baz 10 + celery worker -n foo@myhost -c 10 + celery worker -n bar@myhost -c 10 + celery worker -n baz@myhost -c 10 + celery worker -n xuzzy@myhost -c 3 + +""" +from __future__ import absolute_import, print_function, unicode_literals + +import errno +import os +import shlex +import signal +import socket +import sys + +from collections import defaultdict, namedtuple +from subprocess import Popen +from time import sleep + +from kombu.utils import cached_property +from kombu.utils.compat import OrderedDict +from kombu.utils.encoding import from_utf8 + +from celery import VERSION_BANNER +from celery.five import items +from celery.platforms import Pidfile, IS_WINDOWS +from celery.utils import term, nodesplit +from celery.utils.text import pluralize + +__all__ = ['MultiTool'] + +SIGNAMES = set(sig for sig in dir(signal) + if sig.startswith('SIG') and '_' not in sig) +SIGMAP = dict((getattr(signal, name), name) for name in SIGNAMES) + +USAGE = """\ +usage: {prog_name} start [worker options] + {prog_name} stop [-SIG (default: -TERM)] + {prog_name} stopwait [-SIG (default: -TERM)] + {prog_name} restart [-SIG] [worker options] + {prog_name} kill + + {prog_name} show [worker options] + {prog_name} get hostname [-qv] [worker options] + {prog_name} names + {prog_name} expand template + {prog_name} help + +additional options (must appear after command name): + + * --nosplash: Don't display program info. + * --quiet: Don't show as much output. + * --verbose: Show more output. + * --no-color: Don't display colors. +""" + +multi_args_t = namedtuple( + 'multi_args_t', ('name', 'argv', 'expander', 'namespace'), +) + + +def main(): + sys.exit(MultiTool().execute_from_commandline(sys.argv)) + + +CELERY_EXE = 'celery' +if sys.version_info < (2, 7): + # pkg.__main__ first supported in Py2.7 + CELERY_EXE = 'celery.__main__' + + +def celery_exe(*args): + return ' '.join((CELERY_EXE, ) + args) + + +class MultiTool(object): + retcode = 0 # Final exit code. + + def __init__(self, env=None, fh=None, quiet=False, verbose=False, + no_color=False, nosplash=False, stdout=None, stderr=None): + """fh is an old alias to stdout.""" + self.stdout = self.fh = stdout or fh or sys.stdout + self.stderr = stderr or sys.stderr + self.env = env + self.nosplash = nosplash + self.quiet = quiet + self.verbose = verbose + self.no_color = no_color + self.prog_name = 'celery multi' + self.commands = {'start': self.start, + 'show': self.show, + 'stop': self.stop, + 'stopwait': self.stopwait, + 'stop_verify': self.stopwait, # compat alias + 'restart': self.restart, + 'kill': self.kill, + 'names': self.names, + 'expand': self.expand, + 'get': self.get, + 'help': self.help} + + def execute_from_commandline(self, argv, cmd='celery worker'): + argv = list(argv) # don't modify callers argv. + + # Reserve the --nosplash|--quiet|-q/--verbose options. + if '--nosplash' in argv: + self.nosplash = argv.pop(argv.index('--nosplash')) + if '--quiet' in argv: + self.quiet = argv.pop(argv.index('--quiet')) + if '-q' in argv: + self.quiet = argv.pop(argv.index('-q')) + if '--verbose' in argv: + self.verbose = argv.pop(argv.index('--verbose')) + if '--no-color' in argv: + self.no_color = argv.pop(argv.index('--no-color')) + + self.prog_name = os.path.basename(argv.pop(0)) + if not argv or argv[0][0] == '-': + return self.error() + + try: + self.commands[argv[0]](argv[1:], cmd) + except KeyError: + self.error('Invalid command: {0}'.format(argv[0])) + + return self.retcode + + def say(self, m, newline=True, file=None): + print(m, file=file or self.stdout, end='\n' if newline else '') + + def carp(self, m, newline=True, file=None): + return self.say(m, newline, file or self.stderr) + + def names(self, argv, cmd): + p = NamespacedOptionParser(argv) + self.say('\n'.join( + n.name for n in multi_args(p, cmd)), + ) + + def get(self, argv, cmd): + wanted = argv[0] + p = NamespacedOptionParser(argv[1:]) + for node in multi_args(p, cmd): + if node.name == wanted: + self.say(' '.join(node.argv)) + return + + def show(self, argv, cmd): + p = NamespacedOptionParser(argv) + self.with_detacher_default_options(p) + self.say('\n'.join( + ' '.join([sys.executable] + n.argv) for n in multi_args(p, cmd)), + ) + + def start(self, argv, cmd): + self.splash() + p = NamespacedOptionParser(argv) + self.with_detacher_default_options(p) + retcodes = [] + self.note('> Starting nodes...') + for node in multi_args(p, cmd): + self.note('\t> {0}: '.format(node.name), newline=False) + retcode = self.waitexec(node.argv, path=p.options['--executable']) + self.note(retcode and self.FAILED or self.OK) + retcodes.append(retcode) + self.retcode = int(any(retcodes)) + + def with_detacher_default_options(self, p): + _setdefaultopt(p.options, ['--pidfile', '-p'], '%N.pid') + _setdefaultopt(p.options, ['--logfile', '-f'], '%N.log') + p.options.setdefault( + '--cmd', + '-m {0}'.format(celery_exe('worker', '--detach')), + ) + _setdefaultopt(p.options, ['--executable'], sys.executable) + + def signal_node(self, nodename, pid, sig): + try: + os.kill(pid, sig) + except OSError as exc: + if exc.errno != errno.ESRCH: + raise + self.note('Could not signal {0} ({1}): No such process'.format( + nodename, pid)) + return False + return True + + def node_alive(self, pid): + try: + os.kill(pid, 0) + except OSError as exc: + if exc.errno == errno.ESRCH: + return False + raise + return True + + def shutdown_nodes(self, nodes, sig=signal.SIGTERM, retry=None, + callback=None): + if not nodes: + return + P = set(nodes) + + def on_down(node): + P.discard(node) + if callback: + callback(*node) + + self.note(self.colored.blue('> Stopping nodes...')) + for node in list(P): + if node in P: + nodename, _, pid = node + self.note('\t> {0}: {1} -> {2}'.format( + nodename, SIGMAP[sig][3:], pid)) + if not self.signal_node(nodename, pid, sig): + on_down(node) + + def note_waiting(): + left = len(P) + if left: + pids = ', '.join(str(pid) for _, _, pid in P) + self.note(self.colored.blue( + '> Waiting for {0} {1} -> {2}...'.format( + left, pluralize(left, 'node'), pids)), newline=False) + + if retry: + note_waiting() + its = 0 + while P: + for node in P: + its += 1 + self.note('.', newline=False) + nodename, _, pid = node + if not self.node_alive(pid): + self.note('\n\t> {0}: {1}'.format(nodename, self.OK)) + on_down(node) + note_waiting() + break + if P and not its % len(P): + sleep(float(retry)) + self.note('') + + def getpids(self, p, cmd, callback=None): + _setdefaultopt(p.options, ['--pidfile', '-p'], '%N.pid') + + nodes = [] + for node in multi_args(p, cmd): + try: + pidfile_template = _getopt( + p.namespaces[node.namespace], ['--pidfile', '-p'], + ) + except KeyError: + pidfile_template = _getopt(p.options, ['--pidfile', '-p']) + pid = None + pidfile = node.expander(pidfile_template) + try: + pid = Pidfile(pidfile).read_pid() + except ValueError: + pass + if pid: + nodes.append((node.name, tuple(node.argv), pid)) + else: + self.note('> {0.name}: {1}'.format(node, self.DOWN)) + if callback: + callback(node.name, node.argv, pid) + + return nodes + + def kill(self, argv, cmd): + self.splash() + p = NamespacedOptionParser(argv) + for nodename, _, pid in self.getpids(p, cmd): + self.note('Killing node {0} ({1})'.format(nodename, pid)) + self.signal_node(nodename, pid, signal.SIGKILL) + + def stop(self, argv, cmd, retry=None, callback=None): + self.splash() + p = NamespacedOptionParser(argv) + return self._stop_nodes(p, cmd, retry=retry, callback=callback) + + def _stop_nodes(self, p, cmd, retry=None, callback=None): + restargs = p.args[len(p.values):] + self.shutdown_nodes(self.getpids(p, cmd, callback=callback), + sig=findsig(restargs), + retry=retry, + callback=callback) + + def restart(self, argv, cmd): + self.splash() + p = NamespacedOptionParser(argv) + self.with_detacher_default_options(p) + retvals = [] + + def on_node_shutdown(nodename, argv, pid): + self.note(self.colored.blue( + '> Restarting node {0}: '.format(nodename)), newline=False) + retval = self.waitexec(argv, path=p.options['--executable']) + self.note(retval and self.FAILED or self.OK) + retvals.append(retval) + + self._stop_nodes(p, cmd, retry=2, callback=on_node_shutdown) + self.retval = int(any(retvals)) + + def stopwait(self, argv, cmd): + self.splash() + p = NamespacedOptionParser(argv) + self.with_detacher_default_options(p) + return self._stop_nodes(p, cmd, retry=2) + stop_verify = stopwait # compat + + def expand(self, argv, cmd=None): + template = argv[0] + p = NamespacedOptionParser(argv[1:]) + for node in multi_args(p, cmd): + self.say(node.expander(template)) + + def help(self, argv, cmd=None): + self.say(__doc__) + + def usage(self): + self.splash() + self.say(USAGE.format(prog_name=self.prog_name)) + + def splash(self): + if not self.nosplash: + c = self.colored + self.note(c.cyan('celery multi v{0}'.format(VERSION_BANNER))) + + def waitexec(self, argv, path=sys.executable): + args = ' '.join([path] + list(argv)) + argstr = shlex.split(from_utf8(args), posix=not IS_WINDOWS) + pipe = Popen(argstr, env=self.env) + self.info(' {0}'.format(' '.join(argstr))) + retcode = pipe.wait() + if retcode < 0: + self.note('* Child was terminated by signal {0}'.format(-retcode)) + return -retcode + elif retcode > 0: + self.note('* Child terminated with errorcode {0}'.format(retcode)) + return retcode + + def error(self, msg=None): + if msg: + self.carp(msg) + self.usage() + self.retcode = 1 + return 1 + + def info(self, msg, newline=True): + if self.verbose: + self.note(msg, newline=newline) + + def note(self, msg, newline=True): + if not self.quiet: + self.say(str(msg), newline=newline) + + @cached_property + def colored(self): + return term.colored(enabled=not self.no_color) + + @cached_property + def OK(self): + return str(self.colored.green('OK')) + + @cached_property + def FAILED(self): + return str(self.colored.red('FAILED')) + + @cached_property + def DOWN(self): + return str(self.colored.magenta('DOWN')) + + +def multi_args(p, cmd='celery worker', append='', prefix='', suffix=''): + names = p.values + options = dict(p.options) + passthrough = p.passthrough + ranges = len(names) == 1 + if ranges: + try: + noderange = int(names[0]) + except ValueError: + pass + else: + names = [str(n) for n in range(1, noderange + 1)] + prefix = 'celery' + cmd = options.pop('--cmd', cmd) + append = options.pop('--append', append) + hostname = options.pop('--hostname', + options.pop('-n', socket.gethostname())) + prefix = options.pop('--prefix', prefix) or '' + suffix = options.pop('--suffix', suffix) or hostname + if suffix in ('""', "''"): + suffix = '' + + for ns_name, ns_opts in list(items(p.namespaces)): + if ',' in ns_name or (ranges and '-' in ns_name): + for subns in parse_ns_range(ns_name, ranges): + p.namespaces[subns].update(ns_opts) + p.namespaces.pop(ns_name) + + # Numbers in args always refers to the index in the list of names. + # (e.g. `start foo bar baz -c:1` where 1 is foo, 2 is bar, and so on). + for ns_name, ns_opts in list(items(p.namespaces)): + if ns_name.isdigit(): + ns_index = int(ns_name) - 1 + if ns_index < 0: + raise KeyError('Indexes start at 1 got: %r' % (ns_name, )) + try: + p.namespaces[names[ns_index]].update(ns_opts) + except IndexError: + raise KeyError('No node at index %r' % (ns_name, )) + + for name in names: + this_suffix = suffix + if '@' in name: + this_name = options['-n'] = name + nodename, this_suffix = nodesplit(name) + name = nodename + else: + nodename = '%s%s' % (prefix, name) + this_name = options['-n'] = '%s@%s' % (nodename, this_suffix) + expand = abbreviations({'%h': this_name, + '%n': name, + '%N': nodename, + '%d': this_suffix}) + argv = ([expand(cmd)] + + [format_opt(opt, expand(value)) + for opt, value in items(p.optmerge(name, options))] + + [passthrough]) + if append: + argv.append(expand(append)) + yield multi_args_t(this_name, argv, expand, name) + + +class NamespacedOptionParser(object): + + def __init__(self, args): + self.args = args + self.options = OrderedDict() + self.values = [] + self.passthrough = '' + self.namespaces = defaultdict(lambda: OrderedDict()) + + self.parse() + + def parse(self): + rargs = list(self.args) + pos = 0 + while pos < len(rargs): + arg = rargs[pos] + if arg == '--': + self.passthrough = ' '.join(rargs[pos:]) + break + elif arg[0] == '-': + if arg[1] == '-': + self.process_long_opt(arg[2:]) + else: + value = None + if len(rargs) > pos + 1 and rargs[pos + 1][0] != '-': + value = rargs[pos + 1] + pos += 1 + self.process_short_opt(arg[1:], value) + else: + self.values.append(arg) + pos += 1 + + def process_long_opt(self, arg, value=None): + if '=' in arg: + arg, value = arg.split('=', 1) + self.add_option(arg, value, short=False) + + def process_short_opt(self, arg, value=None): + self.add_option(arg, value, short=True) + + def optmerge(self, ns, defaults=None): + if defaults is None: + defaults = self.options + return OrderedDict(defaults, **self.namespaces[ns]) + + def add_option(self, name, value, short=False, ns=None): + prefix = short and '-' or '--' + dest = self.options + if ':' in name: + name, ns = name.split(':') + dest = self.namespaces[ns] + dest[prefix + name] = value + + +def quote(v): + return "\\'".join("'" + p + "'" for p in v.split("'")) + + +def format_opt(opt, value): + if not value: + return opt + if opt.startswith('--'): + return '{0}={1}'.format(opt, value) + return '{0} {1}'.format(opt, value) + + +def parse_ns_range(ns, ranges=False): + ret = [] + for space in ',' in ns and ns.split(',') or [ns]: + if ranges and '-' in space: + start, stop = space.split('-') + ret.extend( + str(n) for n in range(int(start), int(stop) + 1) + ) + else: + ret.append(space) + return ret + + +def abbreviations(mapping): + + def expand(S): + ret = S + if S is not None: + for short_opt, long_opt in items(mapping): + ret = ret.replace(short_opt, long_opt) + return ret + + return expand + + +def findsig(args, default=signal.SIGTERM): + for arg in reversed(args): + if len(arg) == 2 and arg[0] == '-': + try: + return int(arg[1]) + except ValueError: + pass + if arg[0] == '-': + maybe_sig = 'SIG' + arg[1:] + if maybe_sig in SIGNAMES: + return getattr(signal, maybe_sig) + return default + + +def _getopt(d, alt): + for opt in alt: + try: + return d[opt] + except KeyError: + pass + raise KeyError(alt[0]) + + +def _setdefaultopt(d, alt, value): + for opt in alt[1:]: + try: + return d[opt] + except KeyError: + pass + return d.setdefault(alt[0], value) + + +if __name__ == '__main__': # pragma: no cover + main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bin/worker.py b/thesisenv/lib/python3.6/site-packages/celery/bin/worker.py new file mode 100644 index 0000000..dc04075 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/bin/worker.py @@ -0,0 +1,270 @@ +# -*- coding: utf-8 -*- +""" + +The :program:`celery worker` command (previously known as ``celeryd``) + +.. program:: celery worker + +.. seealso:: + + See :ref:`preload-options`. + +.. cmdoption:: -c, --concurrency + + Number of child processes processing the queue. The default + is the number of CPUs available on your system. + +.. cmdoption:: -P, --pool + + Pool implementation: + + prefork (default), eventlet, gevent, solo or threads. + +.. cmdoption:: -f, --logfile + + Path to log file. If no logfile is specified, `stderr` is used. + +.. cmdoption:: -l, --loglevel + + Logging level, choose between `DEBUG`, `INFO`, `WARNING`, + `ERROR`, `CRITICAL`, or `FATAL`. + +.. cmdoption:: -n, --hostname + + Set custom hostname, e.g. 'w1.%h'. Expands: %h (hostname), + %n (name) and %d, (domain). + +.. cmdoption:: -B, --beat + + Also run the `celery beat` periodic task scheduler. Please note that + there must only be one instance of this service. + +.. cmdoption:: -Q, --queues + + List of queues to enable for this worker, separated by comma. + By default all configured queues are enabled. + Example: `-Q video,image` + +.. cmdoption:: -I, --include + + Comma separated list of additional modules to import. + Example: -I foo.tasks,bar.tasks + +.. cmdoption:: -s, --schedule + + Path to the schedule database if running with the `-B` option. + Defaults to `celerybeat-schedule`. The extension ".db" may be + appended to the filename. + +.. cmdoption:: -O + + Apply optimization profile. Supported: default, fair + +.. cmdoption:: --scheduler + + Scheduler class to use. Default is celery.beat.PersistentScheduler + +.. cmdoption:: -S, --statedb + + Path to the state database. The extension '.db' may + be appended to the filename. Default: {default} + +.. cmdoption:: -E, --events + + Send events that can be captured by monitors like :program:`celery events`, + `celerymon`, and others. + +.. cmdoption:: --without-gossip + + Do not subscribe to other workers events. + +.. cmdoption:: --without-mingle + + Do not synchronize with other workers at startup. + +.. cmdoption:: --without-heartbeat + + Do not send event heartbeats. + +.. cmdoption:: --heartbeat-interval + + Interval in seconds at which to send worker heartbeat + +.. cmdoption:: --purge + + Purges all waiting tasks before the daemon is started. + **WARNING**: This is unrecoverable, and the tasks will be + deleted from the messaging server. + +.. cmdoption:: --time-limit + + Enables a hard time limit (in seconds int/float) for tasks. + +.. cmdoption:: --soft-time-limit + + Enables a soft time limit (in seconds int/float) for tasks. + +.. cmdoption:: --maxtasksperchild + + Maximum number of tasks a pool worker can execute before it's + terminated and replaced by a new worker. + +.. cmdoption:: --pidfile + + Optional file used to store the workers pid. + + The worker will not start if this file already exists + and the pid is still alive. + +.. cmdoption:: --autoscale + + Enable autoscaling by providing + max_concurrency, min_concurrency. Example:: + + --autoscale=10,3 + + (always keep 3 processes, but grow to 10 if necessary) + +.. cmdoption:: --autoreload + + Enable autoreloading. + +.. cmdoption:: --no-execv + + Don't do execv after multiprocessing child fork. + +""" +from __future__ import absolute_import, unicode_literals + +import sys + +from celery import concurrency +from celery.bin.base import Command, Option, daemon_options +from celery.bin.celeryd_detach import detached_celeryd +from celery.five import string_t +from celery.platforms import maybe_drop_privileges +from celery.utils import default_nodename +from celery.utils.log import LOG_LEVELS, mlevel + +__all__ = ['worker', 'main'] + +__MODULE_DOC__ = __doc__ + + +class worker(Command): + """Start worker instance. + + Examples:: + + celery worker --app=proj -l info + celery worker -A proj -l info -Q hipri,lopri + + celery worker -A proj --concurrency=4 + celery worker -A proj --concurrency=1000 -P eventlet + + celery worker --autoscale=10,0 + """ + doc = __MODULE_DOC__ # parse help from this too + namespace = 'celeryd' + enable_config_from_cmdline = True + supports_args = False + + def run_from_argv(self, prog_name, argv=None, command=None): + command = sys.argv[0] if command is None else command + argv = sys.argv[1:] if argv is None else argv + # parse options before detaching so errors can be handled. + options, args = self.prepare_args( + *self.parse_options(prog_name, argv, command)) + self.maybe_detach([command] + argv) + return self(*args, **options) + + def maybe_detach(self, argv, dopts=['-D', '--detach']): + if any(arg in argv for arg in dopts): + argv = [v for v in argv if v not in dopts] + # will never return + detached_celeryd(self.app).execute_from_commandline(argv) + raise SystemExit(0) + + def run(self, hostname=None, pool_cls=None, app=None, uid=None, gid=None, + loglevel=None, logfile=None, pidfile=None, state_db=None, + **kwargs): + maybe_drop_privileges(uid=uid, gid=gid) + # Pools like eventlet/gevent needs to patch libs as early + # as possible. + pool_cls = (concurrency.get_implementation(pool_cls) or + self.app.conf.CELERYD_POOL) + if self.app.IS_WINDOWS and kwargs.get('beat'): + self.die('-B option does not work on Windows. ' + 'Please run celery beat as a separate service.') + hostname = self.host_format(default_nodename(hostname)) + if loglevel: + try: + loglevel = mlevel(loglevel) + except KeyError: # pragma: no cover + self.die('Unknown level {0!r}. Please use one of {1}.'.format( + loglevel, '|'.join( + l for l in LOG_LEVELS if isinstance(l, string_t)))) + + return self.app.Worker( + hostname=hostname, pool_cls=pool_cls, loglevel=loglevel, + logfile=logfile, # node format handled by celery.app.log.setup + pidfile=self.node_format(pidfile, hostname), + state_db=self.node_format(state_db, hostname), **kwargs + ).start() + + def with_pool_option(self, argv): + # this command support custom pools + # that may have to be loaded as early as possible. + return (['-P'], ['--pool']) + + def get_options(self): + conf = self.app.conf + return ( + Option('-c', '--concurrency', + default=conf.CELERYD_CONCURRENCY, type='int'), + Option('-P', '--pool', default=conf.CELERYD_POOL, dest='pool_cls'), + Option('--purge', '--discard', default=False, action='store_true'), + Option('-l', '--loglevel', default=conf.CELERYD_LOG_LEVEL), + Option('-n', '--hostname'), + Option('-B', '--beat', action='store_true'), + Option('-s', '--schedule', dest='schedule_filename', + default=conf.CELERYBEAT_SCHEDULE_FILENAME), + Option('--scheduler', dest='scheduler_cls'), + Option('-S', '--statedb', + default=conf.CELERYD_STATE_DB, dest='state_db'), + Option('-E', '--events', default=conf.CELERY_SEND_EVENTS, + action='store_true', dest='send_events'), + Option('--time-limit', type='float', dest='task_time_limit', + default=conf.CELERYD_TASK_TIME_LIMIT), + Option('--soft-time-limit', dest='task_soft_time_limit', + default=conf.CELERYD_TASK_SOFT_TIME_LIMIT, type='float'), + Option('--maxtasksperchild', dest='max_tasks_per_child', + default=conf.CELERYD_MAX_TASKS_PER_CHILD, type='int'), + Option('--queues', '-Q', default=[]), + Option('--exclude-queues', '-X', default=[]), + Option('--include', '-I', default=[]), + Option('--autoscale'), + Option('--autoreload', action='store_true'), + Option('--no-execv', action='store_true', default=False), + Option('--without-gossip', action='store_true', default=False), + Option('--without-mingle', action='store_true', default=False), + Option('--without-heartbeat', action='store_true', default=False), + Option('--heartbeat-interval', type='int'), + Option('-O', dest='optimization'), + Option('-D', '--detach', action='store_true'), + ) + daemon_options() + tuple(self.app.user_options['worker']) + + +def main(app=None): + # Fix for setuptools generated scripts, so that it will + # work with multiprocessing fork emulation. + # (see multiprocessing.forking.get_preparation_data()) + if __name__ != '__main__': # pragma: no cover + sys.modules['__main__'] = sys.modules[__name__] + from billiard import freeze_support + freeze_support() + worker(app=app).execute_from_commandline() + + +if __name__ == '__main__': # pragma: no cover + main() diff --git a/thesisenv/lib/python3.6/site-packages/celery/bootsteps.py b/thesisenv/lib/python3.6/site-packages/celery/bootsteps.py new file mode 100644 index 0000000..4471a4c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/bootsteps.py @@ -0,0 +1,422 @@ +# -*- coding: utf-8 -*- +""" + celery.bootsteps + ~~~~~~~~~~~~~~~~ + + A directed acyclic graph of reusable components. + +""" +from __future__ import absolute_import, unicode_literals + +from collections import deque +from threading import Event + +from kombu.common import ignore_errors +from kombu.utils import symbol_by_name + +from .datastructures import DependencyGraph, GraphFormatter +from .five import values, with_metaclass +from .utils.imports import instantiate, qualname +from .utils.log import get_logger + +try: + from greenlet import GreenletExit + IGNORE_ERRORS = (GreenletExit, ) +except ImportError: # pragma: no cover + IGNORE_ERRORS = () + +__all__ = ['Blueprint', 'Step', 'StartStopStep', 'ConsumerStep'] + +#: States +RUN = 0x1 +CLOSE = 0x2 +TERMINATE = 0x3 + +logger = get_logger(__name__) +debug = logger.debug + + +def _pre(ns, fmt): + return '| {0}: {1}'.format(ns.alias, fmt) + + +def _label(s): + return s.name.rsplit('.', 1)[-1] + + +class StepFormatter(GraphFormatter): + """Graph formatter for :class:`Blueprint`.""" + + blueprint_prefix = '⧉' + conditional_prefix = '∘' + blueprint_scheme = { + 'shape': 'parallelogram', + 'color': 'slategray4', + 'fillcolor': 'slategray3', + } + + def label(self, step): + return step and '{0}{1}'.format( + self._get_prefix(step), + (step.label or _label(step)).encode('utf-8', 'ignore'), + ) + + def _get_prefix(self, step): + if step.last: + return self.blueprint_prefix + if step.conditional: + return self.conditional_prefix + return '' + + def node(self, obj, **attrs): + scheme = self.blueprint_scheme if obj.last else self.node_scheme + return self.draw_node(obj, scheme, attrs) + + def edge(self, a, b, **attrs): + if a.last: + attrs.update(arrowhead='none', color='darkseagreen3') + return self.draw_edge(a, b, self.edge_scheme, attrs) + + +class Blueprint(object): + """Blueprint containing bootsteps that can be applied to objects. + + :keyword steps: List of steps. + :keyword name: Set explicit name for this blueprint. + :keyword app: Set the Celery app for this blueprint. + :keyword on_start: Optional callback applied after blueprint start. + :keyword on_close: Optional callback applied before blueprint close. + :keyword on_stopped: Optional callback applied after blueprint stopped. + + """ + GraphFormatter = StepFormatter + + name = None + state = None + started = 0 + default_steps = set() + state_to_name = { + 0: 'initializing', + RUN: 'running', + CLOSE: 'closing', + TERMINATE: 'terminating', + } + + def __init__(self, steps=None, name=None, app=None, + on_start=None, on_close=None, on_stopped=None): + self.app = app + self.name = name or self.name or qualname(type(self)) + self.types = set(steps or []) | set(self.default_steps) + self.on_start = on_start + self.on_close = on_close + self.on_stopped = on_stopped + self.shutdown_complete = Event() + self.steps = {} + + def start(self, parent): + self.state = RUN + if self.on_start: + self.on_start() + for i, step in enumerate(s for s in parent.steps if s is not None): + self._debug('Starting %s', step.alias) + self.started = i + 1 + step.start(parent) + debug('^-- substep ok') + + def human_state(self): + return self.state_to_name[self.state or 0] + + def info(self, parent): + info = {} + for step in parent.steps: + info.update(step.info(parent) or {}) + return info + + def close(self, parent): + if self.on_close: + self.on_close() + self.send_all(parent, 'close', 'closing', reverse=False) + + def restart(self, parent, method='stop', + description='restarting', propagate=False): + self.send_all(parent, method, description, propagate=propagate) + + def send_all(self, parent, method, + description=None, reverse=True, propagate=True, args=()): + description = description or method.replace('_', ' ') + steps = reversed(parent.steps) if reverse else parent.steps + for step in steps: + if step: + fun = getattr(step, method, None) + if fun is not None: + self._debug('%s %s...', + description.capitalize(), step.alias) + try: + fun(parent, *args) + except Exception as exc: + if propagate: + raise + logger.error( + 'Error on %s %s: %r', + description, step.alias, exc, exc_info=1, + ) + + def stop(self, parent, close=True, terminate=False): + what = 'terminating' if terminate else 'stopping' + if self.state in (CLOSE, TERMINATE): + return + + if self.state != RUN or self.started != len(parent.steps): + # Not fully started, can safely exit. + self.state = TERMINATE + self.shutdown_complete.set() + return + self.close(parent) + self.state = CLOSE + + self.restart( + parent, 'terminate' if terminate else 'stop', + description=what, propagate=False, + ) + + if self.on_stopped: + self.on_stopped() + self.state = TERMINATE + self.shutdown_complete.set() + + def join(self, timeout=None): + try: + # Will only get here if running green, + # makes sure all greenthreads have exited. + self.shutdown_complete.wait(timeout=timeout) + except IGNORE_ERRORS: + pass + + def apply(self, parent, **kwargs): + """Apply the steps in this blueprint to an object. + + This will apply the ``__init__`` and ``include`` methods + of each step, with the object as argument:: + + step = Step(obj) + ... + step.include(obj) + + For :class:`StartStopStep` the services created + will also be added to the objects ``steps`` attribute. + + """ + self._debug('Preparing bootsteps.') + order = self.order = [] + steps = self.steps = self.claim_steps() + + self._debug('Building graph...') + for S in self._finalize_steps(steps): + step = S(parent, **kwargs) + steps[step.name] = step + order.append(step) + self._debug('New boot order: {%s}', + ', '.join(s.alias for s in self.order)) + for step in order: + step.include(parent) + return self + + def connect_with(self, other): + self.graph.adjacent.update(other.graph.adjacent) + self.graph.add_edge(type(other.order[0]), type(self.order[-1])) + + def __getitem__(self, name): + return self.steps[name] + + def _find_last(self): + return next((C for C in values(self.steps) if C.last), None) + + def _firstpass(self, steps): + for step in values(steps): + step.requires = [symbol_by_name(dep) for dep in step.requires] + stream = deque(step.requires for step in values(steps)) + while stream: + for node in stream.popleft(): + node = symbol_by_name(node) + if node.name not in self.steps: + steps[node.name] = node + stream.append(node.requires) + + def _finalize_steps(self, steps): + last = self._find_last() + self._firstpass(steps) + it = ((C, C.requires) for C in values(steps)) + G = self.graph = DependencyGraph( + it, formatter=self.GraphFormatter(root=last), + ) + if last: + for obj in G: + if obj != last: + G.add_edge(last, obj) + try: + return G.topsort() + except KeyError as exc: + raise KeyError('unknown bootstep: %s' % exc) + + def claim_steps(self): + return dict(self.load_step(step) for step in self._all_steps()) + + def _all_steps(self): + return self.types | self.app.steps[self.name.lower()] + + def load_step(self, step): + step = symbol_by_name(step) + return step.name, step + + def _debug(self, msg, *args): + return debug(_pre(self, msg), *args) + + @property + def alias(self): + return _label(self) + + +class StepType(type): + """Metaclass for steps.""" + + def __new__(cls, name, bases, attrs): + module = attrs.get('__module__') + qname = '{0}.{1}'.format(module, name) if module else name + attrs.update( + __qualname__=qname, + name=attrs.get('name') or qname, + ) + return super(StepType, cls).__new__(cls, name, bases, attrs) + + def __str__(self): + return self.name + + def __repr__(self): + return 'step:{0.name}{{{0.requires!r}}}'.format(self) + + +@with_metaclass(StepType) +class Step(object): + """A Bootstep. + + The :meth:`__init__` method is called when the step + is bound to a parent object, and can as such be used + to initialize attributes in the parent object at + parent instantiation-time. + + """ + + #: Optional step name, will use qualname if not specified. + name = None + + #: Optional short name used for graph outputs and in logs. + label = None + + #: Set this to true if the step is enabled based on some condition. + conditional = False + + #: List of other steps that that must be started before this step. + #: Note that all dependencies must be in the same blueprint. + requires = () + + #: This flag is reserved for the workers Consumer, + #: since it is required to always be started last. + #: There can only be one object marked last + #: in every blueprint. + last = False + + #: This provides the default for :meth:`include_if`. + enabled = True + + def __init__(self, parent, **kwargs): + pass + + def include_if(self, parent): + """An optional predicate that decides whether this + step should be created.""" + return self.enabled + + def instantiate(self, name, *args, **kwargs): + return instantiate(name, *args, **kwargs) + + def _should_include(self, parent): + if self.include_if(parent): + return True, self.create(parent) + return False, None + + def include(self, parent): + return self._should_include(parent)[0] + + def create(self, parent): + """Create the step.""" + pass + + def __repr__(self): + return ''.format(self) + + @property + def alias(self): + return self.label or _label(self) + + def info(self, obj): + pass + + +class StartStopStep(Step): + + #: Optional obj created by the :meth:`create` method. + #: This is used by :class:`StartStopStep` to keep the + #: original service object. + obj = None + + def start(self, parent): + if self.obj: + return self.obj.start() + + def stop(self, parent): + if self.obj: + return self.obj.stop() + + def close(self, parent): + pass + + def terminate(self, parent): + if self.obj: + return getattr(self.obj, 'terminate', self.obj.stop)() + + def include(self, parent): + inc, ret = self._should_include(parent) + if inc: + self.obj = ret + parent.steps.append(self) + return inc + + +class ConsumerStep(StartStopStep): + requires = ('celery.worker.consumer:Connection', ) + consumers = None + + def get_consumers(self, channel): + raise NotImplementedError('missing get_consumers') + + def start(self, c): + channel = c.connection.channel() + self.consumers = self.get_consumers(channel) + for consumer in self.consumers or []: + consumer.consume() + + def stop(self, c): + self._close(c, True) + + def shutdown(self, c): + self._close(c, False) + + def _close(self, c, cancel_consumers=True): + channels = set() + for consumer in self.consumers or []: + if cancel_consumers: + ignore_errors(c.connection, consumer.cancel) + if consumer.channel: + channels.add(consumer.channel) + for channel in channels: + ignore_errors(c.connection, channel.close) diff --git a/thesisenv/lib/python3.6/site-packages/celery/canvas.py b/thesisenv/lib/python3.6/site-packages/celery/canvas.py new file mode 100644 index 0000000..4149e39 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/canvas.py @@ -0,0 +1,698 @@ +# -*- coding: utf-8 -*- +""" + celery.canvas + ~~~~~~~~~~~~~ + + Composing task workflows. + + Documentation for some of these types are in :mod:`celery`. + You should import these from :mod:`celery` and not this module. + + +""" +from __future__ import absolute_import + +from collections import MutableSequence +from copy import deepcopy +from functools import partial as _partial, reduce +from operator import itemgetter +from itertools import chain as _chain + +from kombu.utils import cached_property, fxrange, kwdict, reprcall, uuid + +from celery._state import current_app +from celery.utils.functional import ( + maybe_list, is_list, regen, + chunks as _chunks, +) +from celery.utils.text import truncate + +__all__ = ['Signature', 'chain', 'xmap', 'xstarmap', 'chunks', + 'group', 'chord', 'signature', 'maybe_signature'] + + +class _getitem_property(object): + """Attribute -> dict key descriptor. + + The target object must support ``__getitem__``, + and optionally ``__setitem__``. + + Example: + + >>> from collections import defaultdict + + >>> class Me(dict): + ... deep = defaultdict(dict) + ... + ... foo = _getitem_property('foo') + ... deep_thing = _getitem_property('deep.thing') + + + >>> me = Me() + >>> me.foo + None + + >>> me.foo = 10 + >>> me.foo + 10 + >>> me['foo'] + 10 + + >>> me.deep_thing = 42 + >>> me.deep_thing + 42 + >>> me.deep + defaultdict(, {'thing': 42}) + + """ + + def __init__(self, keypath): + path, _, self.key = keypath.rpartition('.') + self.path = path.split('.') if path else None + + def _path(self, obj): + return (reduce(lambda d, k: d[k], [obj] + self.path) if self.path + else obj) + + def __get__(self, obj, type=None): + if obj is None: + return type + return self._path(obj).get(self.key) + + def __set__(self, obj, value): + self._path(obj)[self.key] = value + + +def maybe_unroll_group(g): + """Unroll group with only one member.""" + # Issue #1656 + try: + size = len(g.tasks) + except TypeError: + try: + size = g.tasks.__length_hint__() + except (AttributeError, TypeError): + pass + else: + return list(g.tasks)[0] if size == 1 else g + else: + return g.tasks[0] if size == 1 else g + + +def _upgrade(fields, sig): + """Used by custom signatures in .from_dict, to keep common fields.""" + sig.update(chord_size=fields.get('chord_size')) + return sig + + +class Signature(dict): + """Class that wraps the arguments and execution options + for a single task invocation. + + Used as the parts in a :class:`group` and other constructs, + or to pass tasks around as callbacks while being compatible + with serializers with a strict type subset. + + :param task: Either a task class/instance, or the name of a task. + :keyword args: Positional arguments to apply. + :keyword kwargs: Keyword arguments to apply. + :keyword options: Additional options to :meth:`Task.apply_async`. + + Note that if the first argument is a :class:`dict`, the other + arguments will be ignored and the values in the dict will be used + instead. + + >>> s = signature('tasks.add', args=(2, 2)) + >>> signature(s) + {'task': 'tasks.add', args=(2, 2), kwargs={}, options={}} + + """ + TYPES = {} + _app = _type = None + + @classmethod + def register_type(cls, subclass, name=None): + cls.TYPES[name or subclass.__name__] = subclass + return subclass + + @classmethod + def from_dict(self, d, app=None): + typ = d.get('subtask_type') + if typ: + return self.TYPES[typ].from_dict(kwdict(d), app=app) + return Signature(d, app=app) + + def __init__(self, task=None, args=None, kwargs=None, options=None, + type=None, subtask_type=None, immutable=False, + app=None, **ex): + self._app = app + init = dict.__init__ + + if isinstance(task, dict): + return init(self, task) # works like dict(d) + + # Also supports using task class/instance instead of string name. + try: + task_name = task.name + except AttributeError: + task_name = task + else: + self._type = task + + init(self, + task=task_name, args=tuple(args or ()), + kwargs=kwargs or {}, + options=dict(options or {}, **ex), + subtask_type=subtask_type, + immutable=immutable, + chord_size=None) + + def __call__(self, *partial_args, **partial_kwargs): + args, kwargs, _ = self._merge(partial_args, partial_kwargs, None) + return self.type(*args, **kwargs) + + def delay(self, *partial_args, **partial_kwargs): + return self.apply_async(partial_args, partial_kwargs) + + def apply(self, args=(), kwargs={}, **options): + """Apply this task locally.""" + # For callbacks: extra args are prepended to the stored args. + args, kwargs, options = self._merge(args, kwargs, options) + return self.type.apply(args, kwargs, **options) + + def _merge(self, args=(), kwargs={}, options={}): + if self.immutable: + return (self.args, self.kwargs, + dict(self.options, **options) if options else self.options) + return (tuple(args) + tuple(self.args) if args else self.args, + dict(self.kwargs, **kwargs) if kwargs else self.kwargs, + dict(self.options, **options) if options else self.options) + + def clone(self, args=(), kwargs={}, app=None, **opts): + # need to deepcopy options so origins links etc. is not modified. + if args or kwargs or opts: + args, kwargs, opts = self._merge(args, kwargs, opts) + else: + args, kwargs, opts = self.args, self.kwargs, self.options + s = Signature.from_dict({'task': self.task, 'args': tuple(args), + 'kwargs': kwargs, 'options': deepcopy(opts), + 'subtask_type': self.subtask_type, + 'chord_size': self.chord_size, + 'immutable': self.immutable}, + app=app or self._app) + s._type = self._type + return s + partial = clone + + def freeze(self, _id=None, group_id=None, chord=None): + opts = self.options + try: + tid = opts['task_id'] + except KeyError: + tid = opts['task_id'] = _id or uuid() + if 'reply_to' not in opts: + opts['reply_to'] = self.app.oid + if group_id: + opts['group_id'] = group_id + if chord: + opts['chord'] = chord + return self.app.AsyncResult(tid) + _freeze = freeze + + def replace(self, args=None, kwargs=None, options=None): + s = self.clone() + if args is not None: + s.args = args + if kwargs is not None: + s.kwargs = kwargs + if options is not None: + s.options = options + return s + + def set(self, immutable=None, **options): + if immutable is not None: + self.set_immutable(immutable) + self.options.update(options) + return self + + def set_immutable(self, immutable): + self.immutable = immutable + + def apply_async(self, args=(), kwargs={}, **options): + try: + _apply = self._apply_async + except IndexError: # no tasks for chain, etc to find type + return + # For callbacks: extra args are prepended to the stored args. + if args or kwargs or options: + args, kwargs, options = self._merge(args, kwargs, options) + else: + args, kwargs, options = self.args, self.kwargs, self.options + return _apply(args, kwargs, **options) + + def append_to_list_option(self, key, value): + items = self.options.setdefault(key, []) + if not isinstance(items, MutableSequence): + items = self.options[key] = [items] + if value not in items: + items.append(value) + return value + + def link(self, callback): + return self.append_to_list_option('link', callback) + + def link_error(self, errback): + return self.append_to_list_option('link_error', errback) + + def flatten_links(self): + return list(_chain.from_iterable(_chain( + [[self]], + (link.flatten_links() + for link in maybe_list(self.options.get('link')) or []) + ))) + + def __or__(self, other): + if isinstance(other, group): + other = maybe_unroll_group(other) + if not isinstance(self, chain) and isinstance(other, chain): + return chain((self, ) + other.tasks, app=self._app) + elif isinstance(other, chain): + return chain(*self.tasks + other.tasks, app=self._app) + elif isinstance(other, Signature): + if isinstance(self, chain): + return chain(*self.tasks + (other, ), app=self._app) + return chain(self, other, app=self._app) + return NotImplemented + + def __deepcopy__(self, memo): + memo[id(self)] = self + return dict(self) + + def __invert__(self): + return self.apply_async().get() + + def __reduce__(self): + # for serialization, the task type is lazily loaded, + # and not stored in the dict itself. + return subtask, (dict(self), ) + + def reprcall(self, *args, **kwargs): + args, kwargs, _ = self._merge(args, kwargs, {}) + return reprcall(self['task'], args, kwargs) + + def election(self): + type = self.type + app = type.app + tid = self.options.get('task_id') or uuid() + + with app.producer_or_acquire(None) as P: + props = type.backend.on_task_call(P, tid) + app.control.election(tid, 'task', self.clone(task_id=tid, **props), + connection=P.connection) + return type.AsyncResult(tid) + + def __repr__(self): + return self.reprcall() + + @cached_property + def type(self): + return self._type or self.app.tasks[self['task']] + + @cached_property + def app(self): + return self._app or current_app + + @cached_property + def AsyncResult(self): + try: + return self.type.AsyncResult + except KeyError: # task not registered + return self.app.AsyncResult + + @cached_property + def _apply_async(self): + try: + return self.type.apply_async + except KeyError: + return _partial(self.app.send_task, self['task']) + id = _getitem_property('options.task_id') + task = _getitem_property('task') + args = _getitem_property('args') + kwargs = _getitem_property('kwargs') + options = _getitem_property('options') + subtask_type = _getitem_property('subtask_type') + chord_size = _getitem_property('chord_size') + immutable = _getitem_property('immutable') + + +@Signature.register_type +class chain(Signature): + + def __init__(self, *tasks, **options): + tasks = (regen(tasks[0]) if len(tasks) == 1 and is_list(tasks[0]) + else tasks) + Signature.__init__( + self, 'celery.chain', (), {'tasks': tasks}, **options + ) + self.tasks = tasks + self.subtask_type = 'chain' + + def __call__(self, *args, **kwargs): + if self.tasks: + return self.apply_async(args, kwargs) + + @classmethod + def from_dict(self, d, app=None): + tasks = [maybe_signature(t, app=app) for t in d['kwargs']['tasks']] + if d['args'] and tasks: + # partial args passed on to first task in chain (Issue #1057). + tasks[0]['args'] = tasks[0]._merge(d['args'])[0] + return _upgrade(d, chain(*tasks, app=app, **d['options'])) + + @property + def type(self): + try: + return self._type or self.tasks[0].type.app.tasks['celery.chain'] + except KeyError: + return self.app.tasks['celery.chain'] + + def __repr__(self): + return ' | '.join(repr(t) for t in self.tasks) + + +class _basemap(Signature): + _task_name = None + _unpack_args = itemgetter('task', 'it') + + def __init__(self, task, it, **options): + Signature.__init__( + self, self._task_name, (), + {'task': task, 'it': regen(it)}, immutable=True, **options + ) + + def apply_async(self, args=(), kwargs={}, **opts): + # need to evaluate generators + task, it = self._unpack_args(self.kwargs) + return self.type.apply_async( + (), {'task': task, 'it': list(it)}, **opts + ) + + @classmethod + def from_dict(cls, d, app=None): + return _upgrade( + d, cls(*cls._unpack_args(d['kwargs']), app=app, **d['options']), + ) + + +@Signature.register_type +class xmap(_basemap): + _task_name = 'celery.map' + + def __repr__(self): + task, it = self._unpack_args(self.kwargs) + return '[{0}(x) for x in {1}]'.format(task.task, + truncate(repr(it), 100)) + + +@Signature.register_type +class xstarmap(_basemap): + _task_name = 'celery.starmap' + + def __repr__(self): + task, it = self._unpack_args(self.kwargs) + return '[{0}(*x) for x in {1}]'.format(task.task, + truncate(repr(it), 100)) + + +@Signature.register_type +class chunks(Signature): + _unpack_args = itemgetter('task', 'it', 'n') + + def __init__(self, task, it, n, **options): + Signature.__init__( + self, 'celery.chunks', (), + {'task': task, 'it': regen(it), 'n': n}, + immutable=True, **options + ) + + @classmethod + def from_dict(self, d, app=None): + return _upgrade( + d, chunks(*self._unpack_args( + d['kwargs']), app=app, **d['options']), + ) + + def apply_async(self, args=(), kwargs={}, **opts): + return self.group().apply_async(args, kwargs, **opts) + + def __call__(self, **options): + return self.group()(**options) + + def group(self): + # need to evaluate generators + task, it, n = self._unpack_args(self.kwargs) + return group((xstarmap(task, part, app=self._app) + for part in _chunks(iter(it), n)), + app=self._app) + + @classmethod + def apply_chunks(cls, task, it, n, app=None): + return cls(task, it, n, app=app)() + + +def _maybe_group(tasks): + if isinstance(tasks, group): + tasks = list(tasks.tasks) + elif isinstance(tasks, Signature): + tasks = [tasks] + else: + tasks = regen(tasks) + return tasks + + +def _maybe_clone(tasks, app): + return [s.clone() if isinstance(s, Signature) else signature(s, app=app) + for s in tasks] + + +@Signature.register_type +class group(Signature): + + def __init__(self, *tasks, **options): + if len(tasks) == 1: + tasks = _maybe_group(tasks[0]) + Signature.__init__( + self, 'celery.group', (), {'tasks': tasks}, **options + ) + self.tasks, self.subtask_type = tasks, 'group' + + @classmethod + def from_dict(self, d, app=None): + tasks = [maybe_signature(t, app=app) for t in d['kwargs']['tasks']] + if d['args'] and tasks: + # partial args passed on to all tasks in the group (Issue #1057). + for task in tasks: + task['args'] = task._merge(d['args'])[0] + return _upgrade(d, group(tasks, app=app, **kwdict(d['options']))) + + def apply_async(self, args=(), kwargs=None, add_to_parent=True, **options): + tasks = _maybe_clone(self.tasks, app=self._app) + if not tasks: + return self.freeze() + type = self.type + return type(*type.prepare(dict(self.options, **options), tasks, args), + add_to_parent=add_to_parent) + + def set_immutable(self, immutable): + for task in self.tasks: + task.set_immutable(immutable) + + def link(self, sig): + # Simply link to first task + sig = sig.clone().set(immutable=True) + return self.tasks[0].link(sig) + + def link_error(self, sig): + sig = sig.clone().set(immutable=True) + return self.tasks[0].link_error(sig) + + def apply(self, *args, **kwargs): + if not self.tasks: + return self.freeze() # empty group returns GroupResult + return Signature.apply(self, *args, **kwargs) + + def __call__(self, *partial_args, **options): + return self.apply_async(partial_args, **options) + + def freeze(self, _id=None, group_id=None, chord=None): + opts = self.options + try: + gid = opts['task_id'] + except KeyError: + gid = opts['task_id'] = uuid() + if group_id: + opts['group_id'] = group_id + if chord: + opts['chord'] = group_id + new_tasks, results = [], [] + for task in self.tasks: + task = maybe_signature(task, app=self._app).clone() + results.append(task.freeze(group_id=group_id, chord=chord)) + new_tasks.append(task) + self.tasks = self.kwargs['tasks'] = new_tasks + return self.app.GroupResult(gid, results) + _freeze = freeze + + def skew(self, start=1.0, stop=None, step=1.0): + it = fxrange(start, stop, step, repeatlast=True) + for task in self.tasks: + task.set(countdown=next(it)) + return self + + def __iter__(self): + return iter(self.tasks) + + def __repr__(self): + return repr(self.tasks) + + @property + def app(self): + return self._app or (self.tasks[0].app if self.tasks else current_app) + + @property + def type(self): + if self._type: + return self._type + # taking the app from the first task in the list, there may be a + # better solution for this, e.g. to consolidate tasks with the same + # app and apply them in batches. + return self.app.tasks[self['task']] + + +@Signature.register_type +class chord(Signature): + + def __init__(self, header, body=None, task='celery.chord', + args=(), kwargs={}, **options): + Signature.__init__( + self, task, args, + dict(kwargs, header=_maybe_group(header), + body=maybe_signature(body, app=self._app)), **options + ) + self.subtask_type = 'chord' + + def apply(self, args=(), kwargs={}, **options): + # For callbacks: extra args are prepended to the stored args. + args, kwargs, options = self._merge(args, kwargs, options) + return self.type.apply(args, kwargs, **options) + + def freeze(self, _id=None, group_id=None, chord=None): + return self.body.freeze(_id, group_id=group_id, chord=chord) + + @classmethod + def from_dict(self, d, app=None): + args, d['kwargs'] = self._unpack_args(**kwdict(d['kwargs'])) + return _upgrade(d, self(*args, app=app, **kwdict(d))) + + @staticmethod + def _unpack_args(header=None, body=None, **kwargs): + # Python signatures are better at extracting keys from dicts + # than manually popping things off. + return (header, body), kwargs + + @property + def app(self): + # we will be able to fix this mess in 3.2 when we no longer + # require an actual task implementation for chord/group + if self._app: + return self._app + app = None if self.body is None else self.body.app + if app is None: + try: + app = self.tasks[0].app + except IndexError: + app = None + return app if app is not None else current_app + + @property + def type(self): + if self._type: + return self._type + return self.app.tasks['celery.chord'] + + def delay(self, *partial_args, **partial_kwargs): + # There's no partial_kwargs for chord. + return self.apply_async(partial_args) + + def apply_async(self, args=(), kwargs={}, task_id=None, + producer=None, publisher=None, connection=None, + router=None, result_cls=None, **options): + args = (tuple(args) + tuple(self.args) + if args and not self.immutable else self.args) + body = kwargs.get('body') or self.kwargs['body'] + kwargs = dict(self.kwargs, **kwargs) + body = body.clone(**options) + + _chord = self.type + if _chord.app.conf.CELERY_ALWAYS_EAGER: + return self.apply(args, kwargs, task_id=task_id, **options) + res = body.freeze(task_id) + parent = _chord(self.tasks, body, args, **options) + res.parent = parent + return res + + def __call__(self, body=None, **options): + return self.apply_async( + (), {'body': body} if body else {}, **options) + + def clone(self, *args, **kwargs): + s = Signature.clone(self, *args, **kwargs) + # need to make copy of body + try: + s.kwargs['body'] = s.kwargs['body'].clone() + except (AttributeError, KeyError): + pass + return s + + def link(self, callback): + self.body.link(callback) + return callback + + def link_error(self, errback): + self.body.link_error(errback) + return errback + + def set_immutable(self, immutable): + # changes mutability of header only, not callback. + for task in self.tasks: + task.set_immutable(immutable) + + def __repr__(self): + if self.body: + return self.body.reprcall(self.tasks) + return ''.format(self) + + tasks = _getitem_property('kwargs.header') + body = _getitem_property('kwargs.body') + + +def signature(varies, args=(), kwargs={}, options={}, app=None, **kw): + if isinstance(varies, dict): + if isinstance(varies, Signature): + return varies.clone(app=app) + return Signature.from_dict(varies, app=app) + return Signature(varies, args, kwargs, options, app=app, **kw) +subtask = signature # XXX compat + + +def maybe_signature(d, app=None): + if d is not None: + if isinstance(d, dict): + if not isinstance(d, Signature): + return signature(d, app=app) + elif isinstance(d, list): + return [maybe_signature(s, app=app) for s in d] + if app is not None: + d._app = app + return d +maybe_subtask = maybe_signature # XXX compat diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/__init__.py new file mode 100644 index 0000000..c58fdbc --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/concurrency/__init__.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +""" + celery.concurrency + ~~~~~~~~~~~~~~~~~~ + + Pool implementation abstract factory, and alias definitions. + +""" +from __future__ import absolute_import + +# Import from kombu directly as it's used +# early in the import stage, where celery.utils loads +# too much (e.g. for eventlet patching) +from kombu.utils import symbol_by_name + +__all__ = ['get_implementation'] + +ALIASES = { + 'prefork': 'celery.concurrency.prefork:TaskPool', + 'eventlet': 'celery.concurrency.eventlet:TaskPool', + 'gevent': 'celery.concurrency.gevent:TaskPool', + 'threads': 'celery.concurrency.threads:TaskPool', + 'solo': 'celery.concurrency.solo:TaskPool', + 'processes': 'celery.concurrency.prefork:TaskPool', # XXX compat alias +} + + +def get_implementation(cls): + return symbol_by_name(cls, ALIASES) diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/asynpool.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/asynpool.py new file mode 100644 index 0000000..bc29d9c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/concurrency/asynpool.py @@ -0,0 +1,1270 @@ +# -*- coding: utf-8 -*- +""" + celery.concurrency.asynpool + ~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + .. note:: + + This module will be moved soon, so don't use it directly. + + Non-blocking version of :class:`multiprocessing.Pool`. + + This code deals with three major challenges: + + 1) Starting up child processes and keeping them running. + 2) Sending jobs to the processes and receiving results back. + 3) Safely shutting down this system. + +""" +from __future__ import absolute_import + +import errno +import gc +import os +import select +import socket +import struct +import sys +import time + +from collections import deque, namedtuple +from io import BytesIO +from pickle import HIGHEST_PROTOCOL +from time import sleep +from weakref import WeakValueDictionary, ref + +from amqp.utils import promise +from billiard.pool import RUN, TERMINATE, ACK, NACK, WorkersJoined +from billiard import pool as _pool +from billiard.compat import buf_t, setblocking, isblocking +from billiard.einfo import ExceptionInfo +from billiard.queues import _SimpleQueue +from kombu.async import READ, WRITE, ERR +from kombu.serialization import pickle as _pickle +from kombu.utils import fxrange +from kombu.utils.compat import get_errno +from kombu.utils.eventio import SELECT_BAD_FD +from celery.five import Counter, items, string_t, text_t, values +from celery.utils.log import get_logger +from celery.utils.text import truncate +from celery.worker import state as worker_state + +try: + from _billiard import read as __read__ + from struct import unpack_from as _unpack_from + memoryview = memoryview + readcanbuf = True + + if sys.version_info[0] == 2 and sys.version_info < (2, 7, 6): + + def unpack_from(fmt, view, _unpack_from=_unpack_from): # noqa + return _unpack_from(fmt, view.tobytes()) # <- memoryview + else: + # unpack_from supports memoryview in 2.7.6 and 3.3+ + unpack_from = _unpack_from # noqa + +except (ImportError, NameError): # pragma: no cover + + def __read__(fd, buf, size, read=os.read): # noqa + chunk = read(fd, size) + n = len(chunk) + if n != 0: + buf.write(chunk) + return n + readcanbuf = False # noqa + + def unpack_from(fmt, iobuf, unpack=struct.unpack): # noqa + return unpack(fmt, iobuf.getvalue()) # <-- BytesIO + + +logger = get_logger(__name__) +error, debug = logger.error, logger.debug + +UNAVAIL = frozenset([errno.EAGAIN, errno.EINTR]) + +#: Constant sent by child process when started (ready to accept work) +WORKER_UP = 15 + +#: A process must have started before this timeout (in secs.) expires. +PROC_ALIVE_TIMEOUT = 4.0 + +SCHED_STRATEGY_PREFETCH = 1 +SCHED_STRATEGY_FAIR = 4 + +SCHED_STRATEGIES = { + None: SCHED_STRATEGY_PREFETCH, + 'fair': SCHED_STRATEGY_FAIR, +} + +RESULT_MAXLEN = 128 + +Ack = namedtuple('Ack', ('id', 'fd', 'payload')) + + +def gen_not_started(gen): + # gi_frame is None when generator stopped. + return gen.gi_frame and gen.gi_frame.f_lasti == -1 + + +def _get_job_writer(job): + try: + writer = job._writer + except AttributeError: + pass + else: + return writer() # is a weakref + + +def _select(readers=None, writers=None, err=None, timeout=0): + """Simple wrapper to :class:`~select.select`. + + :param readers: Set of reader fds to test if readable. + :param writers: Set of writer fds to test if writable. + :param err: Set of fds to test for error condition. + + All fd sets passed must be mutable as this function + will remove non-working fds from them, this also means + the caller must make sure there are still fds in the sets + before calling us again. + + :returns: tuple of ``(readable, writable, again)``, where + ``readable`` is a set of fds that have data available for read, + ``writable`` is a set of fds that is ready to be written to + and ``again`` is a flag that if set means the caller must + throw away the result and call us again. + + """ + readers = set() if readers is None else readers + writers = set() if writers is None else writers + err = set() if err is None else err + try: + r, w, e = select.select(readers, writers, err, timeout) + if e: + r = list(set(r) | set(e)) + return r, w, 0 + except (select.error, socket.error) as exc: + if get_errno(exc) == errno.EINTR: + return [], [], 1 + elif get_errno(exc) in SELECT_BAD_FD: + for fd in readers | writers | err: + try: + select.select([fd], [], [], 0) + except (select.error, socket.error) as exc: + if get_errno(exc) not in SELECT_BAD_FD: + raise + readers.discard(fd) + writers.discard(fd) + err.discard(fd) + return [], [], 1 + else: + raise + + +def _repr_result(obj): + try: + return repr(obj) + except Exception as orig_exc: + try: + return text_t(obj) + except UnicodeDecodeError: + if isinstance(obj, string_t): + try: + return obj.decode('utf-8', errors='replace') + except Exception: + pass + return ''.format( + orig_exc, + ) + + +class Worker(_pool.Worker): + """Pool worker process.""" + dead = False + + def on_loop_start(self, pid): + # our version sends a WORKER_UP message when the process is ready + # to accept work, this will tell the parent that the inqueue fd + # is writable. + self.outq.put((WORKER_UP, (pid, ))) + + def prepare_result(self, result, maxlen=RESULT_MAXLEN, truncate=truncate): + if not isinstance(result, ExceptionInfo): + return truncate(_repr_result(result), maxlen) + return result + + +class ResultHandler(_pool.ResultHandler): + """Handles messages from the pool processes.""" + + def __init__(self, *args, **kwargs): + self.fileno_to_outq = kwargs.pop('fileno_to_outq') + self.on_process_alive = kwargs.pop('on_process_alive') + super(ResultHandler, self).__init__(*args, **kwargs) + # add our custom message handler + self.state_handlers[WORKER_UP] = self.on_process_alive + + def _recv_message(self, add_reader, fd, callback, + __read__=__read__, readcanbuf=readcanbuf, + BytesIO=BytesIO, unpack_from=unpack_from, + load=_pickle.load): + Hr = Br = 0 + if readcanbuf: + buf = bytearray(4) + bufv = memoryview(buf) + else: + buf = bufv = BytesIO() + # header + + while Hr < 4: + try: + n = __read__( + fd, bufv[Hr:] if readcanbuf else bufv, 4 - Hr, + ) + except OSError as exc: + if get_errno(exc) not in UNAVAIL: + raise + yield + else: + if n == 0: + raise (OSError('End of file during message') if Hr + else EOFError()) + Hr += n + + body_size, = unpack_from('>i', bufv) + if readcanbuf: + buf = bytearray(body_size) + bufv = memoryview(buf) + else: + buf = bufv = BytesIO() + + while Br < body_size: + try: + n = __read__( + fd, bufv[Br:] if readcanbuf else bufv, body_size - Br, + ) + except OSError as exc: + if get_errno(exc) not in UNAVAIL: + raise + yield + else: + if n == 0: + raise (OSError('End of file during message') if Br + else EOFError()) + Br += n + add_reader(fd, self.handle_event, fd) + if readcanbuf: + message = load(BytesIO(bufv)) + else: + bufv.seek(0) + message = load(bufv) + if message: + callback(message) + + def _make_process_result(self, hub): + """Coroutine that reads messages from the pool processes + and calls the appropriate handler.""" + fileno_to_outq = self.fileno_to_outq + on_state_change = self.on_state_change + add_reader = hub.add_reader + remove_reader = hub.remove_reader + recv_message = self._recv_message + + def on_result_readable(fileno): + try: + fileno_to_outq[fileno] + except KeyError: # process gone + return remove_reader(fileno) + it = recv_message(add_reader, fileno, on_state_change) + try: + next(it) + except StopIteration: + pass + except (IOError, OSError, EOFError): + remove_reader(fileno) + else: + add_reader(fileno, it) + return on_result_readable + + def register_with_event_loop(self, hub): + self.handle_event = self._make_process_result(hub) + + def handle_event(self, fileno): + raise RuntimeError('Not registered with event loop') + + def on_stop_not_started(self): + """This method is always used to stop when the helper thread is not + started.""" + cache = self.cache + check_timeouts = self.check_timeouts + fileno_to_outq = self.fileno_to_outq + on_state_change = self.on_state_change + join_exited_workers = self.join_exited_workers + + # flush the processes outqueues until they have all terminated. + outqueues = set(fileno_to_outq) + while cache and outqueues and self._state != TERMINATE: + if check_timeouts is not None: + # make sure tasks with a time limit will time out. + check_timeouts() + # cannot iterate and remove at the same time + pending_remove_fd = set() + for fd in outqueues: + self._flush_outqueue( + fd, pending_remove_fd.discard, fileno_to_outq, + on_state_change, + ) + try: + join_exited_workers(shutdown=True) + except WorkersJoined: + return debug('result handler: all workers terminated') + outqueues.difference_update(pending_remove_fd) + + def _flush_outqueue(self, fd, remove, process_index, on_state_change): + try: + proc = process_index[fd] + except KeyError: + # process already found terminated + # which means its outqueue has already been processed + # by the worker lost handler. + return remove(fd) + + reader = proc.outq._reader + try: + setblocking(reader, 1) + except (OSError, IOError): + return remove(fd) + try: + if reader.poll(0): + task = reader.recv() + else: + task = None + sleep(0.5) + except (IOError, EOFError): + return remove(fd) + else: + if task: + on_state_change(task) + finally: + try: + setblocking(reader, 0) + except (OSError, IOError): + return remove(fd) + + +class AsynPool(_pool.Pool): + """Pool version that uses AIO instead of helper threads.""" + ResultHandler = ResultHandler + Worker = Worker + + def __init__(self, processes=None, synack=False, + sched_strategy=None, *args, **kwargs): + self.sched_strategy = SCHED_STRATEGIES.get(sched_strategy, + sched_strategy) + processes = self.cpu_count() if processes is None else processes + self.synack = synack + # create queue-pairs for all our processes in advance. + self._queues = dict((self.create_process_queues(), None) + for _ in range(processes)) + + # inqueue fileno -> process mapping + self._fileno_to_inq = {} + # outqueue fileno -> process mapping + self._fileno_to_outq = {} + # synqueue fileno -> process mapping + self._fileno_to_synq = {} + + # We keep track of processes that have not yet + # sent a WORKER_UP message. If a process fails to send + # this message within proc_up_timeout we terminate it + # and hope the next process will recover. + self._proc_alive_timeout = PROC_ALIVE_TIMEOUT + self._waiting_to_start = set() + + # denormalized set of all inqueues. + self._all_inqueues = set() + + # Set of fds being written to (busy) + self._active_writes = set() + + # Set of active co-routines currently writing jobs. + self._active_writers = set() + + # Set of fds that are busy (executing task) + self._busy_workers = set() + self._mark_worker_as_available = self._busy_workers.discard + + # Holds jobs waiting to be written to child processes. + self.outbound_buffer = deque() + + self.write_stats = Counter() + + super(AsynPool, self).__init__(processes, *args, **kwargs) + + for proc in self._pool: + # create initial mappings, these will be updated + # as processes are recycled, or found lost elsewhere. + self._fileno_to_outq[proc.outqR_fd] = proc + self._fileno_to_synq[proc.synqW_fd] = proc + self.on_soft_timeout = self.on_hard_timeout = None + if self._timeout_handler: + self.on_soft_timeout = self._timeout_handler.on_soft_timeout + self.on_hard_timeout = self._timeout_handler.on_hard_timeout + + def _create_worker_process(self, i): + gc.collect() # Issue #2927 + return super(AsynPool, self)._create_worker_process(i) + + def _event_process_exit(self, hub, proc): + # This method is called whenever the process sentinel is readable. + self._untrack_child_process(proc, hub) + self.maintain_pool() + + def _track_child_process(self, proc, hub): + try: + fd = proc._sentinel_poll + except AttributeError: + # we need to duplicate the fd here to carefully + # control when the fd is removed from the process table, + # as once the original fd is closed we cannot unregister + # the fd from epoll(7) anymore, causing a 100% CPU poll loop. + fd = proc._sentinel_poll = os.dup(proc._popen.sentinel) + hub.add_reader(fd, self._event_process_exit, hub, proc) + + def _untrack_child_process(self, proc, hub): + if proc._sentinel_poll is not None: + fd, proc._sentinel_poll = proc._sentinel_poll, None + hub.remove(fd) + os.close(fd) + + def register_with_event_loop(self, hub): + """Registers the async pool with the current event loop.""" + self._result_handler.register_with_event_loop(hub) + self.handle_result_event = self._result_handler.handle_event + self._create_timelimit_handlers(hub) + self._create_process_handlers(hub) + self._create_write_handlers(hub) + + # Add handler for when a process exits (calls maintain_pool) + [self._track_child_process(w, hub) for w in self._pool] + # Handle_result_event is called whenever one of the + # result queues are readable. + [hub.add_reader(fd, self.handle_result_event, fd) + for fd in self._fileno_to_outq] + + # Timers include calling maintain_pool at a regular interval + # to be certain processes are restarted. + for handler, interval in items(self.timers): + hub.call_repeatedly(interval, handler) + + hub.on_tick.add(self.on_poll_start) + + def _create_timelimit_handlers(self, hub, now=time.time): + """For async pool this sets up the handlers used + to implement time limits.""" + call_later = hub.call_later + trefs = self._tref_for_id = WeakValueDictionary() + + def on_timeout_set(R, soft, hard): + if soft: + trefs[R._job] = call_later( + soft, self._on_soft_timeout, R._job, soft, hard, hub, + ) + elif hard: + trefs[R._job] = call_later( + hard, self._on_hard_timeout, R._job, + ) + self.on_timeout_set = on_timeout_set + + def _discard_tref(job): + try: + tref = trefs.pop(job) + tref.cancel() + del(tref) + except (KeyError, AttributeError): + pass # out of scope + self._discard_tref = _discard_tref + + def on_timeout_cancel(R): + _discard_tref(R._job) + self.on_timeout_cancel = on_timeout_cancel + + def _on_soft_timeout(self, job, soft, hard, hub, now=time.time): + # only used by async pool. + if hard: + self._tref_for_id[job] = hub.call_at( + now() + (hard - soft), self._on_hard_timeout, job, + ) + try: + result = self._cache[job] + except KeyError: + pass # job ready + else: + self.on_soft_timeout(result) + finally: + if not hard: + # remove tref + self._discard_tref(job) + + def _on_hard_timeout(self, job): + # only used by async pool. + try: + result = self._cache[job] + except KeyError: + pass # job ready + else: + self.on_hard_timeout(result) + finally: + # remove tref + self._discard_tref(job) + + def on_job_ready(self, job, i, obj, inqW_fd): + self._mark_worker_as_available(inqW_fd) + + def _create_process_handlers(self, hub, READ=READ, ERR=ERR): + """For async pool this will create the handlers called + when a process is up/down and etc.""" + add_reader, remove_reader, remove_writer = ( + hub.add_reader, hub.remove_reader, hub.remove_writer, + ) + cache = self._cache + all_inqueues = self._all_inqueues + fileno_to_inq = self._fileno_to_inq + fileno_to_outq = self._fileno_to_outq + fileno_to_synq = self._fileno_to_synq + busy_workers = self._busy_workers + handle_result_event = self.handle_result_event + process_flush_queues = self.process_flush_queues + waiting_to_start = self._waiting_to_start + + def verify_process_alive(proc): + proc = proc() # is a weakref + if (proc is not None and proc._is_alive() and + proc in waiting_to_start): + assert proc.outqR_fd in fileno_to_outq + assert fileno_to_outq[proc.outqR_fd] is proc + assert proc.outqR_fd in hub.readers + error('Timed out waiting for UP message from %r', proc) + os.kill(proc.pid, 9) + + def on_process_up(proc): + """Called when a process has started.""" + # If we got the same fd as a previous process then we will also + # receive jobs in the old buffer, so we need to reset the + # job._write_to and job._scheduled_for attributes used to recover + # message boundaries when processes exit. + infd = proc.inqW_fd + for job in values(cache): + if job._write_to and job._write_to.inqW_fd == infd: + job._write_to = proc + if job._scheduled_for and job._scheduled_for.inqW_fd == infd: + job._scheduled_for = proc + fileno_to_outq[proc.outqR_fd] = proc + + # maintain_pool is called whenever a process exits. + self._track_child_process(proc, hub) + + assert not isblocking(proc.outq._reader) + + # handle_result_event is called when the processes outqueue is + # readable. + add_reader(proc.outqR_fd, handle_result_event, proc.outqR_fd) + + waiting_to_start.add(proc) + hub.call_later( + self._proc_alive_timeout, verify_process_alive, ref(proc), + ) + + self.on_process_up = on_process_up + + def _remove_from_index(obj, proc, index, remove_fun, callback=None): + # this remove the file descriptors for a process from + # the indices. we have to make sure we don't overwrite + # another processes fds, as the fds may be reused. + try: + fd = obj.fileno() + except (IOError, OSError): + return + + try: + if index[fd] is proc: + # fd has not been reused so we can remove it from index. + index.pop(fd, None) + except KeyError: + pass + else: + remove_fun(fd) + if callback is not None: + callback(fd) + return fd + + def on_process_down(proc): + """Called when a worker process exits.""" + if getattr(proc, 'dead', None): + return + process_flush_queues(proc) + _remove_from_index( + proc.outq._reader, proc, fileno_to_outq, remove_reader, + ) + if proc.synq: + _remove_from_index( + proc.synq._writer, proc, fileno_to_synq, remove_writer, + ) + inq = _remove_from_index( + proc.inq._writer, proc, fileno_to_inq, remove_writer, + callback=all_inqueues.discard, + ) + if inq: + busy_workers.discard(inq) + self._untrack_child_process(proc, hub) + waiting_to_start.discard(proc) + self._active_writes.discard(proc.inqW_fd) + remove_writer(proc.inq._writer) + remove_reader(proc.outq._reader) + if proc.synqR_fd: + remove_reader(proc.synq._reader) + if proc.synqW_fd: + self._active_writes.discard(proc.synqW_fd) + remove_reader(proc.synq._writer) + self.on_process_down = on_process_down + + def _create_write_handlers(self, hub, + pack=struct.pack, dumps=_pickle.dumps, + protocol=HIGHEST_PROTOCOL): + """For async pool this creates the handlers used to write data to + child processes.""" + fileno_to_inq = self._fileno_to_inq + fileno_to_synq = self._fileno_to_synq + outbound = self.outbound_buffer + pop_message = outbound.popleft + append_message = outbound.append + put_back_message = outbound.appendleft + all_inqueues = self._all_inqueues + active_writes = self._active_writes + active_writers = self._active_writers + busy_workers = self._busy_workers + diff = all_inqueues.difference + add_writer = hub.add_writer + hub_add, hub_remove = hub.add, hub.remove + mark_write_fd_as_active = active_writes.add + mark_write_gen_as_active = active_writers.add + mark_worker_as_busy = busy_workers.add + write_generator_done = active_writers.discard + get_job = self._cache.__getitem__ + write_stats = self.write_stats + is_fair_strategy = self.sched_strategy == SCHED_STRATEGY_FAIR + revoked_tasks = worker_state.revoked + getpid = os.getpid + + precalc = {ACK: self._create_payload(ACK, (0, )), + NACK: self._create_payload(NACK, (0, ))} + + def _put_back(job, _time=time.time): + # puts back at the end of the queue + if job._terminated is not None or \ + job.correlation_id in revoked_tasks: + if not job._accepted: + job._ack(None, _time(), getpid(), None) + job._set_terminated(job._terminated) + else: + # XXX linear lookup, should find a better way, + # but this happens rarely and is here to protect against races. + if job not in outbound: + outbound.appendleft(job) + self._put_back = _put_back + + # called for every event loop iteration, and if there + # are messages pending this will schedule writing one message + # by registering the 'schedule_writes' function for all currently + # inactive inqueues (not already being written to) + + # consolidate means the event loop will merge them + # and call the callback once with the list writable fds as + # argument. Using this means we minimize the risk of having + # the same fd receive every task if the pipe read buffer is not + # full. + if is_fair_strategy: + + def on_poll_start(): + if outbound and len(busy_workers) < len(all_inqueues): + inactive = diff(active_writes) + [hub_add(fd, None, WRITE | ERR, consolidate=True) + for fd in inactive] + else: + [hub_remove(fd) for fd in diff(active_writes)] + else: + def on_poll_start(): # noqa + if outbound: + [hub_add(fd, None, WRITE | ERR, consolidate=True) + for fd in diff(active_writes)] + else: + [hub_remove(fd) for fd in diff(active_writes)] + self.on_poll_start = on_poll_start + + def on_inqueue_close(fd, proc): + # Makes sure the fd is removed from tracking when + # the connection is closed, this is essential as fds may be reused. + busy_workers.discard(fd) + try: + if fileno_to_inq[fd] is proc: + fileno_to_inq.pop(fd, None) + active_writes.discard(fd) + all_inqueues.discard(fd) + hub_remove(fd) + except KeyError: + pass + self.on_inqueue_close = on_inqueue_close + + def schedule_writes(ready_fds, curindex=[0]): + # Schedule write operation to ready file descriptor. + # The file descriptor is writeable, but that does not + # mean the process is currently reading from the socket. + # The socket is buffered so writeable simply means that + # the buffer can accept at least 1 byte of data. + + # This means we have to cycle between the ready fds. + # the first version used shuffle, but using i % total + # is about 30% faster with many processes. The latter + # also shows more fairness in write stats when used with + # many processes [XXX On OS X, this may vary depending + # on event loop implementation (i.e select vs epoll), so + # have to test further] + total = len(ready_fds) + + for i in range(total): + ready_fd = ready_fds[curindex[0] % total] + if ready_fd in active_writes: + # already writing to this fd + curindex[0] += 1 + continue + if is_fair_strategy and ready_fd in busy_workers: + # worker is already busy with another task + curindex[0] += 1 + continue + if ready_fd not in all_inqueues: + hub_remove(ready_fd) + curindex[0] += 1 + continue + try: + job = pop_message() + except IndexError: + # no more messages, remove all inactive fds from the hub. + # this is important since the fds are always writeable + # as long as there's 1 byte left in the buffer, and so + # this may create a spinloop where the event loop + # always wakes up. + for inqfd in diff(active_writes): + hub_remove(inqfd) + break + else: + if not job._accepted: # job not accepted by another worker + try: + # keep track of what process the write operation + # was scheduled for. + proc = job._scheduled_for = fileno_to_inq[ready_fd] + except KeyError: + # write was scheduled for this fd but the process + # has since exited and the message must be sent to + # another process. + put_back_message(job) + curindex[0] += 1 + continue + cor = _write_job(proc, ready_fd, job) + job._writer = ref(cor) + mark_write_gen_as_active(cor) + mark_write_fd_as_active(ready_fd) + mark_worker_as_busy(ready_fd) + + # Try to write immediately, in case there's an error. + try: + next(cor) + except StopIteration: + pass + except OSError as exc: + if get_errno(exc) != errno.EBADF: + raise + else: + add_writer(ready_fd, cor) + curindex[0] += 1 + hub.consolidate_callback = schedule_writes + + def send_job(tup): + # Schedule writing job request for when one of the process + # inqueues are writable. + body = dumps(tup, protocol=protocol) + body_size = len(body) + header = pack('>I', body_size) + # index 1,0 is the job ID. + job = get_job(tup[1][0]) + job._payload = buf_t(header), buf_t(body), body_size + append_message(job) + self._quick_put = send_job + + def on_not_recovering(proc, fd, job, exc): + error('Process inqueue damaged: %r %r: %r', + proc, proc.exitcode, exc, exc_info=1) + if proc._is_alive(): + proc.terminate() + hub.remove(fd) + self._put_back(job) + + def _write_job(proc, fd, job): + # writes job to the worker process. + # Operation must complete if more than one byte of data + # was written. If the broker connection is lost + # and no data was written the operation shall be canceled. + header, body, body_size = job._payload + errors = 0 + try: + # job result keeps track of what process the job is sent to. + job._write_to = proc + send = proc.send_job_offset + + Hw = Bw = 0 + # write header + while Hw < 4: + try: + Hw += send(header, Hw) + except Exception as exc: + if get_errno(exc) not in UNAVAIL: + raise + # suspend until more data + errors += 1 + if errors > 100: + on_not_recovering(proc, fd, job, exc) + raise StopIteration() + yield + else: + errors = 0 + + # write body + while Bw < body_size: + try: + Bw += send(body, Bw) + except Exception as exc: + if get_errno(exc) not in UNAVAIL: + raise + # suspend until more data + errors += 1 + if errors > 100: + on_not_recovering(proc, fd, job, exc) + raise StopIteration() + yield + else: + errors = 0 + finally: + hub_remove(fd) + write_stats[proc.index] += 1 + # message written, so this fd is now available + active_writes.discard(fd) + write_generator_done(job._writer()) # is a weakref + + def send_ack(response, pid, job, fd, WRITE=WRITE, ERR=ERR): + # Only used when synack is enabled. + # Schedule writing ack response for when the fd is writeable. + msg = Ack(job, fd, precalc[response]) + callback = promise(write_generator_done) + cor = _write_ack(fd, msg, callback=callback) + mark_write_gen_as_active(cor) + mark_write_fd_as_active(fd) + callback.args = (cor, ) + add_writer(fd, cor) + self.send_ack = send_ack + + def _write_ack(fd, ack, callback=None): + # writes ack back to the worker if synack enabled. + # this operation *MUST* complete, otherwise + # the worker process will hang waiting for the ack. + header, body, body_size = ack[2] + try: + try: + proc = fileno_to_synq[fd] + except KeyError: + # process died, we can safely discard the ack at this + # point. + raise StopIteration() + send = proc.send_syn_offset + + Hw = Bw = 0 + # write header + while Hw < 4: + try: + Hw += send(header, Hw) + except Exception as exc: + if get_errno(exc) not in UNAVAIL: + raise + yield + + # write body + while Bw < body_size: + try: + Bw += send(body, Bw) + except Exception as exc: + if get_errno(exc) not in UNAVAIL: + raise + # suspend until more data + yield + finally: + if callback: + callback() + # message written, so this fd is now available + active_writes.discard(fd) + + def flush(self): + if self._state == TERMINATE: + return + # cancel all tasks that have not been accepted so that NACK is sent. + for job in values(self._cache): + if not job._accepted: + job._cancel() + + # clear the outgoing buffer as the tasks will be redelivered by + # the broker anyway. + if self.outbound_buffer: + self.outbound_buffer.clear() + + self.maintain_pool() + + try: + # ...but we must continue writing the payloads we already started + # to keep message boundaries. + # The messages may be NACK'ed later if synack is enabled. + if self._state == RUN: + # flush outgoing buffers + intervals = fxrange(0.01, 0.1, 0.01, repeatlast=True) + owned_by = {} + for job in values(self._cache): + writer = _get_job_writer(job) + if writer is not None: + owned_by[writer] = job + + while self._active_writers: + writers = list(self._active_writers) + for gen in writers: + if (gen.__name__ == '_write_job' and + gen_not_started(gen)): + # has not started writing the job so can + # discard the task, but we must also remove + # it from the Pool._cache. + try: + job = owned_by[gen] + except KeyError: + pass + else: + # removes from Pool._cache + job.discard() + self._active_writers.discard(gen) + else: + try: + job = owned_by[gen] + except KeyError: + pass + else: + job_proc = job._write_to + if job_proc._is_alive(): + self._flush_writer(job_proc, gen) + # workers may have exited in the meantime. + self.maintain_pool() + sleep(next(intervals)) # don't busyloop + finally: + self.outbound_buffer.clear() + self._active_writers.clear() + self._active_writes.clear() + self._busy_workers.clear() + + def _flush_writer(self, proc, writer): + fds = set([proc.inq._writer]) + try: + while fds: + if not proc._is_alive(): + break # process exited + readable, writable, again = _select( + writers=fds, err=fds, timeout=0.5, + ) + if not again and (writable or readable): + try: + next(writer) + except (StopIteration, OSError, IOError, EOFError): + break + finally: + self._active_writers.discard(writer) + + def get_process_queues(self): + """Get queues for a new process. + + Here we will find an unused slot, as there should always + be one available when we start a new process. + """ + return next(q for q, owner in items(self._queues) + if owner is None) + + def on_grow(self, n): + """Grow the pool by ``n`` proceses.""" + diff = max(self._processes - len(self._queues), 0) + if diff: + self._queues.update( + dict((self.create_process_queues(), None) for _ in range(diff)) + ) + + def on_shrink(self, n): + """Shrink the pool by ``n`` processes.""" + pass + + def create_process_queues(self): + """Creates new in, out (and optionally syn) queues, + returned as a tuple.""" + # NOTE: Pipes must be set O_NONBLOCK at creation time (the original + # fd), otherwise it will not be possible to change the flags until + # there is an actual reader/writer on the other side. + inq = _SimpleQueue(wnonblock=True) + outq = _SimpleQueue(rnonblock=True) + synq = None + assert isblocking(inq._reader) + assert not isblocking(inq._writer) + assert not isblocking(outq._reader) + assert isblocking(outq._writer) + if self.synack: + synq = _SimpleQueue(wnonblock=True) + assert isblocking(synq._reader) + assert not isblocking(synq._writer) + return inq, outq, synq + + def on_process_alive(self, pid): + """Handler called when the :const:`WORKER_UP` message is received + from a child process, which marks the process as ready + to receive work.""" + try: + proc = next(w for w in self._pool if w.pid == pid) + except StopIteration: + return logger.warning('process with pid=%s already exited', pid) + assert proc.inqW_fd not in self._fileno_to_inq + assert proc.inqW_fd not in self._all_inqueues + self._waiting_to_start.discard(proc) + self._fileno_to_inq[proc.inqW_fd] = proc + self._fileno_to_synq[proc.synqW_fd] = proc + self._all_inqueues.add(proc.inqW_fd) + + def on_job_process_down(self, job, pid_gone): + """Handler called for each job when the process it was assigned to + exits.""" + if job._write_to and not job._write_to._is_alive(): + # job was partially written + self.on_partial_read(job, job._write_to) + elif job._scheduled_for and not job._scheduled_for._is_alive(): + # job was only scheduled to be written to this process, + # but no data was sent so put it back on the outbound_buffer. + self._put_back(job) + + def on_job_process_lost(self, job, pid, exitcode): + """Handler called for each *started* job when the process it + was assigned to exited by mysterious means (error exitcodes and + signals)""" + self.mark_as_worker_lost(job, exitcode) + + def human_write_stats(self): + if self.write_stats is None: + return 'N/A' + vals = list(values(self.write_stats)) + total = sum(vals) + + def per(v, total): + return '{0:.2f}%'.format((float(v) / total) * 100.0 if v else 0) + + return { + 'total': total, + 'avg': per(total / len(self.write_stats) if total else 0, total), + 'all': ', '.join(per(v, total) for v in vals), + 'raw': ', '.join(map(str, vals)), + 'inqueues': { + 'total': len(self._all_inqueues), + 'active': len(self._active_writes), + } + } + + def _process_cleanup_queues(self, proc): + """Handler called to clean up a processes queues after process + exit.""" + if not proc.dead: + try: + self._queues[self._find_worker_queues(proc)] = None + except (KeyError, ValueError): + pass + + @staticmethod + def _stop_task_handler(task_handler): + """Called at shutdown to tell processes that we are shutting down.""" + for proc in task_handler.pool: + try: + setblocking(proc.inq._writer, 1) + except (OSError, IOError): + pass + else: + try: + proc.inq.put(None) + except OSError as exc: + if get_errno(exc) != errno.EBADF: + raise + + def create_result_handler(self): + return super(AsynPool, self).create_result_handler( + fileno_to_outq=self._fileno_to_outq, + on_process_alive=self.on_process_alive, + ) + + def _process_register_queues(self, proc, queues): + """Marks new ownership for ``queues`` so that the fileno indices are + updated.""" + assert queues in self._queues + b = len(self._queues) + self._queues[queues] = proc + assert b == len(self._queues) + + def _find_worker_queues(self, proc): + """Find the queues owned by ``proc``.""" + try: + return next(q for q, owner in items(self._queues) + if owner == proc) + except StopIteration: + raise ValueError(proc) + + def _setup_queues(self): + # this is only used by the original pool which uses a shared + # queue for all processes. + + # these attributes makes no sense for us, but we will still + # have to initialize them. + self._inqueue = self._outqueue = \ + self._quick_put = self._quick_get = self._poll_result = None + + def process_flush_queues(self, proc): + """Flushes all queues, including the outbound buffer, so that + all tasks that have not been started will be discarded. + + In Celery this is called whenever the transport connection is lost + (consumer restart). + + """ + resq = proc.outq._reader + on_state_change = self._result_handler.on_state_change + fds = set([resq]) + while fds and not resq.closed and self._state != TERMINATE: + readable, _, again = _select(fds, None, fds, timeout=0.01) + if readable: + try: + task = resq.recv() + except (OSError, IOError, EOFError) as exc: + if get_errno(exc) == errno.EINTR: + continue + elif get_errno(exc) == errno.EAGAIN: + break + else: + debug('got %r while flushing process %r', + exc, proc, exc_info=1) + if get_errno(exc) not in UNAVAIL: + debug('got %r while flushing process %r', + exc, proc, exc_info=1) + break + else: + if task is None: + debug('got sentinel while flushing process %r', proc) + break + else: + on_state_change(task) + else: + break + + def on_partial_read(self, job, proc): + """Called when a job was only partially written to a child process + and it exited.""" + # worker terminated by signal: + # we cannot reuse the sockets again, because we don't know if + # the process wrote/read anything frmo them, and if so we cannot + # restore the message boundaries. + if not job._accepted: + # job was not acked, so find another worker to send it to. + self._put_back(job) + writer = _get_job_writer(job) + if writer: + self._active_writers.discard(writer) + del(writer) + + if not proc.dead: + proc.dead = True + # Replace queues to avoid reuse + before = len(self._queues) + try: + queues = self._find_worker_queues(proc) + if self.destroy_queues(queues, proc): + self._queues[self.create_process_queues()] = None + except ValueError: + pass + assert len(self._queues) == before + + def destroy_queues(self, queues, proc): + """Destroy queues that can no longer be used, so that they + be replaced by new sockets.""" + assert not proc._is_alive() + self._waiting_to_start.discard(proc) + removed = 1 + try: + self._queues.pop(queues) + except KeyError: + removed = 0 + try: + self.on_inqueue_close(queues[0]._writer.fileno(), proc) + except IOError: + pass + for queue in queues: + if queue: + for sock in (queue._reader, queue._writer): + if not sock.closed: + try: + sock.close() + except (IOError, OSError): + pass + return removed + + def _create_payload(self, type_, args, + dumps=_pickle.dumps, pack=struct.pack, + protocol=HIGHEST_PROTOCOL): + body = dumps((type_, args), protocol=protocol) + size = len(body) + header = pack('>I', size) + return header, body, size + + @classmethod + def _set_result_sentinel(cls, _outqueue, _pool): + # unused + pass + + def _help_stuff_finish_args(self): + # Pool._help_stuff_finished is a classmethod so we have to use this + # trick to modify the arguments passed to it. + return (self._pool, ) + + @classmethod + def _help_stuff_finish(cls, pool): + debug( + 'removing tasks from inqueue until task handler finished', + ) + fileno_to_proc = {} + inqR = set() + for w in pool: + try: + fd = w.inq._reader.fileno() + inqR.add(fd) + fileno_to_proc[fd] = w + except IOError: + pass + while inqR: + readable, _, again = _select(inqR, timeout=0.5) + if again: + continue + if not readable: + break + for fd in readable: + fileno_to_proc[fd].inq._reader.recv() + sleep(0) + + @property + def timers(self): + return {self.maintain_pool: 5.0} diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/base.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/base.py new file mode 100644 index 0000000..29c348d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/concurrency/base.py @@ -0,0 +1,171 @@ +# -*- coding: utf-8 -*- +""" + celery.concurrency.base + ~~~~~~~~~~~~~~~~~~~~~~~ + + TaskPool interface. + +""" +from __future__ import absolute_import + +import logging +import os +import sys + +from billiard.einfo import ExceptionInfo +from billiard.exceptions import WorkerLostError +from kombu.utils.encoding import safe_repr + +from celery.exceptions import WorkerShutdown, WorkerTerminate +from celery.five import monotonic, reraise +from celery.utils import timer2 +from celery.utils.text import truncate +from celery.utils.log import get_logger + +__all__ = ['BasePool', 'apply_target'] + +logger = get_logger('celery.pool') + + +def apply_target(target, args=(), kwargs={}, callback=None, + accept_callback=None, pid=None, getpid=os.getpid, + propagate=(), monotonic=monotonic, **_): + if accept_callback: + accept_callback(pid or getpid(), monotonic()) + try: + ret = target(*args, **kwargs) + except propagate: + raise + except Exception: + raise + except (WorkerShutdown, WorkerTerminate): + raise + except BaseException as exc: + try: + reraise(WorkerLostError, WorkerLostError(repr(exc)), + sys.exc_info()[2]) + except WorkerLostError: + callback(ExceptionInfo()) + else: + callback(ret) + + +class BasePool(object): + RUN = 0x1 + CLOSE = 0x2 + TERMINATE = 0x3 + + Timer = timer2.Timer + + #: set to true if the pool can be shutdown from within + #: a signal handler. + signal_safe = True + + #: set to true if pool uses greenlets. + is_green = False + + _state = None + _pool = None + + #: only used by multiprocessing pool + uses_semaphore = False + + task_join_will_block = True + + def __init__(self, limit=None, putlocks=True, + forking_enable=True, callbacks_propagate=(), **options): + self.limit = limit + self.putlocks = putlocks + self.options = options + self.forking_enable = forking_enable + self.callbacks_propagate = callbacks_propagate + self._does_debug = logger.isEnabledFor(logging.DEBUG) + + def on_start(self): + pass + + def did_start_ok(self): + return True + + def flush(self): + pass + + def on_stop(self): + pass + + def register_with_event_loop(self, loop): + pass + + def on_apply(self, *args, **kwargs): + pass + + def on_terminate(self): + pass + + def on_soft_timeout(self, job): + pass + + def on_hard_timeout(self, job): + pass + + def maintain_pool(self, *args, **kwargs): + pass + + def terminate_job(self, pid, signal=None): + raise NotImplementedError( + '{0} does not implement kill_job'.format(type(self))) + + def restart(self): + raise NotImplementedError( + '{0} does not implement restart'.format(type(self))) + + def stop(self): + self.on_stop() + self._state = self.TERMINATE + + def terminate(self): + self._state = self.TERMINATE + self.on_terminate() + + def start(self): + self.on_start() + self._state = self.RUN + + def close(self): + self._state = self.CLOSE + self.on_close() + + def on_close(self): + pass + + def apply_async(self, target, args=[], kwargs={}, **options): + """Equivalent of the :func:`apply` built-in function. + + Callbacks should optimally return as soon as possible since + otherwise the thread which handles the result will get blocked. + + """ + if self._does_debug: + logger.debug('TaskPool: Apply %s (args:%s kwargs:%s)', + target, truncate(safe_repr(args), 1024), + truncate(safe_repr(kwargs), 1024)) + + return self.on_apply(target, args, kwargs, + waitforslot=self.putlocks, + callbacks_propagate=self.callbacks_propagate, + **options) + + def _get_info(self): + return {} + + @property + def info(self): + return self._get_info() + + @property + def active(self): + return self._state == self.RUN + + @property + def num_processes(self): + return self.limit diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/eventlet.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/eventlet.py new file mode 100644 index 0000000..3ae4549 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/concurrency/eventlet.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- +""" + celery.concurrency.eventlet + ~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Eventlet pool implementation. + +""" +from __future__ import absolute_import + +import sys + +from time import time + +__all__ = ['TaskPool'] + +W_RACE = """\ +Celery module with %s imported before eventlet patched\ +""" +RACE_MODS = ('billiard.', 'celery.', 'kombu.') + + +#: Warn if we couldn't patch early enough, +#: and thread/socket depending celery modules have already been loaded. +for mod in (mod for mod in sys.modules if mod.startswith(RACE_MODS)): + for side in ('thread', 'threading', 'socket'): # pragma: no cover + if getattr(mod, side, None): + import warnings + warnings.warn(RuntimeWarning(W_RACE % side)) + + +from celery import signals # noqa +from celery.utils import timer2 # noqa + +from . import base # noqa + + +def apply_target(target, args=(), kwargs={}, callback=None, + accept_callback=None, getpid=None): + return base.apply_target(target, args, kwargs, callback, accept_callback, + pid=getpid()) + + +class Schedule(timer2.Schedule): + + def __init__(self, *args, **kwargs): + from eventlet.greenthread import spawn_after + from greenlet import GreenletExit + super(Schedule, self).__init__(*args, **kwargs) + + self.GreenletExit = GreenletExit + self._spawn_after = spawn_after + self._queue = set() + + def _enter(self, eta, priority, entry): + secs = max(eta - time(), 0) + g = self._spawn_after(secs, entry) + self._queue.add(g) + g.link(self._entry_exit, entry) + g.entry = entry + g.eta = eta + g.priority = priority + g.canceled = False + return g + + def _entry_exit(self, g, entry): + try: + try: + g.wait() + except self.GreenletExit: + entry.cancel() + g.canceled = True + finally: + self._queue.discard(g) + + def clear(self): + queue = self._queue + while queue: + try: + queue.pop().cancel() + except (KeyError, self.GreenletExit): + pass + + @property + def queue(self): + return self._queue + + +class Timer(timer2.Timer): + Schedule = Schedule + + def ensure_started(self): + pass + + def stop(self): + self.schedule.clear() + + def cancel(self, tref): + try: + tref.cancel() + except self.schedule.GreenletExit: + pass + + def start(self): + pass + + +class TaskPool(base.BasePool): + Timer = Timer + + signal_safe = False + is_green = True + task_join_will_block = False + + def __init__(self, *args, **kwargs): + from eventlet import greenthread + from eventlet.greenpool import GreenPool + self.Pool = GreenPool + self.getcurrent = greenthread.getcurrent + self.getpid = lambda: id(greenthread.getcurrent()) + self.spawn_n = greenthread.spawn_n + + super(TaskPool, self).__init__(*args, **kwargs) + + def on_start(self): + self._pool = self.Pool(self.limit) + signals.eventlet_pool_started.send(sender=self) + self._quick_put = self._pool.spawn_n + self._quick_apply_sig = signals.eventlet_pool_apply.send + + def on_stop(self): + signals.eventlet_pool_preshutdown.send(sender=self) + if self._pool is not None: + self._pool.waitall() + signals.eventlet_pool_postshutdown.send(sender=self) + + def on_apply(self, target, args=None, kwargs=None, callback=None, + accept_callback=None, **_): + self._quick_apply_sig( + sender=self, target=target, args=args, kwargs=kwargs, + ) + self._quick_put(apply_target, target, args, kwargs, + callback, accept_callback, + self.getpid) + + def grow(self, n=1): + limit = self.limit + n + self._pool.resize(limit) + self.limit = limit + + def shrink(self, n=1): + limit = self.limit - n + self._pool.resize(limit) + self.limit = limit + + def _get_info(self): + return { + 'max-concurrency': self.limit, + 'free-threads': self._pool.free(), + 'running-threads': self._pool.running(), + } diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/gevent.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/gevent.py new file mode 100644 index 0000000..f567f57 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/concurrency/gevent.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +""" + celery.concurrency.gevent + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + gevent pool implementation. + +""" +from __future__ import absolute_import + +from time import time + +try: + from gevent import Timeout +except ImportError: # pragma: no cover + Timeout = None # noqa + +from celery.utils import timer2 + +from .base import apply_target, BasePool + +__all__ = ['TaskPool'] + + +def apply_timeout(target, args=(), kwargs={}, callback=None, + accept_callback=None, pid=None, timeout=None, + timeout_callback=None, Timeout=Timeout, + apply_target=apply_target, **rest): + try: + with Timeout(timeout): + return apply_target(target, args, kwargs, callback, + accept_callback, pid, + propagate=(Timeout, ), **rest) + except Timeout: + return timeout_callback(False, timeout) + + +class Schedule(timer2.Schedule): + + def __init__(self, *args, **kwargs): + from gevent.greenlet import Greenlet, GreenletExit + + class _Greenlet(Greenlet): + cancel = Greenlet.kill + + self._Greenlet = _Greenlet + self._GreenletExit = GreenletExit + super(Schedule, self).__init__(*args, **kwargs) + self._queue = set() + + def _enter(self, eta, priority, entry): + secs = max(eta - time(), 0) + g = self._Greenlet.spawn_later(secs, entry) + self._queue.add(g) + g.link(self._entry_exit) + g.entry = entry + g.eta = eta + g.priority = priority + g.canceled = False + return g + + def _entry_exit(self, g): + try: + g.kill() + finally: + self._queue.discard(g) + + def clear(self): + queue = self._queue + while queue: + try: + queue.pop().kill() + except KeyError: + pass + + @property + def queue(self): + return self._queue + + +class Timer(timer2.Timer): + Schedule = Schedule + + def ensure_started(self): + pass + + def stop(self): + self.schedule.clear() + + def start(self): + pass + + +class TaskPool(BasePool): + Timer = Timer + + signal_safe = False + is_green = True + task_join_will_block = False + + def __init__(self, *args, **kwargs): + from gevent import spawn_raw + from gevent.pool import Pool + self.Pool = Pool + self.spawn_n = spawn_raw + self.timeout = kwargs.get('timeout') + super(TaskPool, self).__init__(*args, **kwargs) + + def on_start(self): + self._pool = self.Pool(self.limit) + self._quick_put = self._pool.spawn + + def on_stop(self): + if self._pool is not None: + self._pool.join() + + def on_apply(self, target, args=None, kwargs=None, callback=None, + accept_callback=None, timeout=None, + timeout_callback=None, **_): + timeout = self.timeout if timeout is None else timeout + return self._quick_put(apply_timeout if timeout else apply_target, + target, args, kwargs, callback, accept_callback, + timeout=timeout, + timeout_callback=timeout_callback) + + def grow(self, n=1): + self._pool._semaphore.counter += n + self._pool.size += n + + def shrink(self, n=1): + self._pool._semaphore.counter -= n + self._pool.size -= n + + @property + def num_processes(self): + return len(self._pool) diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/prefork.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/prefork.py new file mode 100644 index 0000000..1771f5c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/concurrency/prefork.py @@ -0,0 +1,178 @@ +# -*- coding: utf-8 -*- +""" + celery.concurrency.prefork + ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Pool implementation using :mod:`multiprocessing`. + +""" +from __future__ import absolute_import + +import os + +from billiard import forking_enable +from billiard.pool import RUN, CLOSE, Pool as BlockingPool + +from celery import platforms +from celery import signals +from celery._state import set_default_app, _set_task_join_will_block +from celery.app import trace +from celery.concurrency.base import BasePool +from celery.five import items +from celery.utils.functional import noop +from celery.utils.log import get_logger + +from .asynpool import AsynPool + +__all__ = ['TaskPool', 'process_initializer', 'process_destructor'] + +#: List of signals to reset when a child process starts. +WORKER_SIGRESET = frozenset(['SIGTERM', + 'SIGHUP', + 'SIGTTIN', + 'SIGTTOU', + 'SIGUSR1']) + +#: List of signals to ignore when a child process starts. +WORKER_SIGIGNORE = frozenset(['SIGINT']) + +logger = get_logger(__name__) +warning, debug = logger.warning, logger.debug + + +def process_initializer(app, hostname): + """Pool child process initializer. + + This will initialize a child pool process to ensure the correct + app instance is used and things like + logging works. + + """ + _set_task_join_will_block(True) + platforms.signals.reset(*WORKER_SIGRESET) + platforms.signals.ignore(*WORKER_SIGIGNORE) + platforms.set_mp_process_title('celeryd', hostname=hostname) + # This is for Windows and other platforms not supporting + # fork(). Note that init_worker makes sure it's only + # run once per process. + app.loader.init_worker() + app.loader.init_worker_process() + logfile = os.environ.get('CELERY_LOG_FILE') or None + if logfile and '%i' in logfile.lower(): + # logfile path will differ so need to set up logging again. + app.log.already_setup = False + app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0) or 0), + logfile, + bool(os.environ.get('CELERY_LOG_REDIRECT', False)), + str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL')), + hostname=hostname) + if os.environ.get('FORKED_BY_MULTIPROCESSING'): + # pool did execv after fork + trace.setup_worker_optimizations(app) + else: + app.set_current() + set_default_app(app) + app.finalize() + trace._tasks = app._tasks # enables fast_trace_task optimization. + # rebuild execution handler for all tasks. + from celery.app.trace import build_tracer + for name, task in items(app.tasks): + task.__trace__ = build_tracer(name, task, app.loader, hostname, + app=app) + from celery.worker import state as worker_state + worker_state.reset_state() + signals.worker_process_init.send(sender=None) + + +def process_destructor(pid, exitcode): + """Pool child process destructor + + Dispatch the :signal:`worker_process_shutdown` signal. + + """ + signals.worker_process_shutdown.send( + sender=None, pid=pid, exitcode=exitcode, + ) + + +class TaskPool(BasePool): + """Multiprocessing Pool implementation.""" + Pool = AsynPool + BlockingPool = BlockingPool + + uses_semaphore = True + write_stats = None + + def on_start(self): + """Run the task pool. + + Will pre-fork all workers so they're ready to accept tasks. + + """ + forking_enable(self.forking_enable) + Pool = (self.BlockingPool if self.options.get('threads', True) + else self.Pool) + P = self._pool = Pool(processes=self.limit, + initializer=process_initializer, + on_process_exit=process_destructor, + synack=False, + **self.options) + + # Create proxy methods + self.on_apply = P.apply_async + self.maintain_pool = P.maintain_pool + self.terminate_job = P.terminate_job + self.grow = P.grow + self.shrink = P.shrink + self.flush = getattr(P, 'flush', None) # FIXME add to billiard + + def restart(self): + self._pool.restart() + self._pool.apply_async(noop) + + def did_start_ok(self): + return self._pool.did_start_ok() + + def register_with_event_loop(self, loop): + try: + reg = self._pool.register_with_event_loop + except AttributeError: + return + return reg(loop) + + def on_stop(self): + """Gracefully stop the pool.""" + if self._pool is not None and self._pool._state in (RUN, CLOSE): + self._pool.close() + self._pool.join() + self._pool = None + + def on_terminate(self): + """Force terminate the pool.""" + if self._pool is not None: + self._pool.terminate() + self._pool = None + + def on_close(self): + if self._pool is not None and self._pool._state == RUN: + self._pool.close() + + def _get_info(self): + try: + write_stats = self._pool.human_write_stats + except AttributeError: + def write_stats(): + return 'N/A' # only supported by asynpool + return { + 'max-concurrency': self.limit, + 'processes': [p.pid for p in self._pool._pool], + 'max-tasks-per-child': self._pool._maxtasksperchild or 'N/A', + 'put-guarded-by-semaphore': self.putlocks, + 'timeouts': (self._pool.soft_timeout or 0, + self._pool.timeout or 0), + 'writes': write_stats() + } + + @property + def num_processes(self): + return self._pool._processes diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/solo.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/solo.py new file mode 100644 index 0000000..a2dc199 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/concurrency/solo.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +""" + celery.concurrency.solo + ~~~~~~~~~~~~~~~~~~~~~~~ + + Single-threaded pool implementation. + +""" +from __future__ import absolute_import + +import os + +from .base import BasePool, apply_target + +__all__ = ['TaskPool'] + + +class TaskPool(BasePool): + """Solo task pool (blocking, inline, fast).""" + + def __init__(self, *args, **kwargs): + super(TaskPool, self).__init__(*args, **kwargs) + self.on_apply = apply_target + + def _get_info(self): + return {'max-concurrency': 1, + 'processes': [os.getpid()], + 'max-tasks-per-child': None, + 'put-guarded-by-semaphore': True, + 'timeouts': ()} diff --git a/thesisenv/lib/python3.6/site-packages/celery/concurrency/threads.py b/thesisenv/lib/python3.6/site-packages/celery/concurrency/threads.py new file mode 100644 index 0000000..fee901e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/concurrency/threads.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +""" + celery.concurrency.threads + ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Pool implementation using threads. + +""" +from __future__ import absolute_import + +from celery.five import UserDict + +from .base import apply_target, BasePool + +__all__ = ['TaskPool'] + + +class NullDict(UserDict): + + def __setitem__(self, key, value): + pass + + +class TaskPool(BasePool): + + def __init__(self, *args, **kwargs): + try: + import threadpool + except ImportError: + raise ImportError( + 'The threaded pool requires the threadpool module.') + self.WorkRequest = threadpool.WorkRequest + self.ThreadPool = threadpool.ThreadPool + super(TaskPool, self).__init__(*args, **kwargs) + + def on_start(self): + self._pool = self.ThreadPool(self.limit) + # threadpool stores all work requests until they are processed + # we don't need this dict, and it occupies way too much memory. + self._pool.workRequests = NullDict() + self._quick_put = self._pool.putRequest + self._quick_clear = self._pool._results_queue.queue.clear + + def on_stop(self): + self._pool.dismissWorkers(self.limit, do_join=True) + + def on_apply(self, target, args=None, kwargs=None, callback=None, + accept_callback=None, **_): + req = self.WorkRequest(apply_target, (target, args, kwargs, callback, + accept_callback)) + self._quick_put(req) + # threadpool also has callback support, + # but for some reason the callback is not triggered + # before you've collected the results. + # Clear the results (if any), so it doesn't grow too large. + self._quick_clear() + return req diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/abortable.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/abortable.py new file mode 100644 index 0000000..dcdc615 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/contrib/abortable.py @@ -0,0 +1,172 @@ +# -*- coding: utf-8 -*- +""" +========================= +Abortable tasks overview +========================= + +For long-running :class:`Task`'s, it can be desirable to support +aborting during execution. Of course, these tasks should be built to +support abortion specifically. + +The :class:`AbortableTask` serves as a base class for all :class:`Task` +objects that should support abortion by producers. + +* Producers may invoke the :meth:`abort` method on + :class:`AbortableAsyncResult` instances, to request abortion. + +* Consumers (workers) should periodically check (and honor!) the + :meth:`is_aborted` method at controlled points in their task's + :meth:`run` method. The more often, the better. + +The necessary intermediate communication is dealt with by the +:class:`AbortableTask` implementation. + +Usage example +------------- + +In the consumer: + +.. code-block:: python + + from __future__ import absolute_import + + from celery.contrib.abortable import AbortableTask + from celery.utils.log import get_task_logger + + from proj.celery import app + + logger = get_logger(__name__) + + @app.task(bind=True, base=AbortableTask) + def long_running_task(self): + results = [] + for i in range(100): + # check after every 5 iterations... + # (or alternatively, check when some timer is due) + if not i % 5: + if self.is_aborted(): + # respect aborted state, and terminate gracefully. + logger.warning('Task aborted') + return + value = do_something_expensive(i) + results.append(y) + logger.info('Task complete') + return results + +In the producer: + +.. code-block:: python + + from __future__ import absolute_import + + import time + + from proj.tasks import MyLongRunningTask + + def myview(request): + # result is of type AbortableAsyncResult + result = long_running_task.delay() + + # abort the task after 10 seconds + time.sleep(10) + result.abort() + +After the `result.abort()` call, the task execution is not +aborted immediately. In fact, it is not guaranteed to abort at all. Keep +checking `result.state` status, or call `result.get(timeout=)` to +have it block until the task is finished. + +.. note:: + + In order to abort tasks, there needs to be communication between the + producer and the consumer. This is currently implemented through the + database backend. Therefore, this class will only work with the + database backends. + +""" +from __future__ import absolute_import + +from celery import Task +from celery.result import AsyncResult + +__all__ = ['AbortableAsyncResult', 'AbortableTask'] + + +""" +Task States +----------- + +.. state:: ABORTED + +ABORTED +~~~~~~~ + +Task is aborted (typically by the producer) and should be +aborted as soon as possible. + +""" +ABORTED = 'ABORTED' + + +class AbortableAsyncResult(AsyncResult): + """Represents a abortable result. + + Specifically, this gives the `AsyncResult` a :meth:`abort()` method, + which sets the state of the underlying Task to `'ABORTED'`. + + """ + + def is_aborted(self): + """Return :const:`True` if the task is (being) aborted.""" + return self.state == ABORTED + + def abort(self): + """Set the state of the task to :const:`ABORTED`. + + Abortable tasks monitor their state at regular intervals and + terminate execution if so. + + Be aware that invoking this method does not guarantee when the + task will be aborted (or even if the task will be aborted at + all). + + """ + # TODO: store_result requires all four arguments to be set, + # but only status should be updated here + return self.backend.store_result(self.id, result=None, + status=ABORTED, traceback=None) + + +class AbortableTask(Task): + """A celery task that serves as a base class for all :class:`Task`'s + that support aborting during execution. + + All subclasses of :class:`AbortableTask` must call the + :meth:`is_aborted` method periodically and act accordingly when + the call evaluates to :const:`True`. + + """ + abstract = True + + def AsyncResult(self, task_id): + """Return the accompanying AbortableAsyncResult instance.""" + return AbortableAsyncResult(task_id, backend=self.backend) + + def is_aborted(self, **kwargs): + """Checks against the backend whether this + :class:`AbortableAsyncResult` is :const:`ABORTED`. + + Always return :const:`False` in case the `task_id` parameter + refers to a regular (non-abortable) :class:`Task`. + + Be aware that invoking this method will cause a hit in the + backend (for example a database query), so find a good balance + between calling it regularly (for responsiveness), but not too + often (for performance). + + """ + task_id = kwargs.get('task_id', self.request.id) + result = self.AsyncResult(task_id) + if not isinstance(result, AbortableAsyncResult): + return False + return result.is_aborted() diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/batches.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/batches.py new file mode 100644 index 0000000..30f0a20 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/contrib/batches.py @@ -0,0 +1,249 @@ +# -*- coding: utf-8 -*- +""" +celery.contrib.batches +====================== + +Experimental task class that buffers messages and processes them as a list. + +.. warning:: + + For this to work you have to set + :setting:`CELERYD_PREFETCH_MULTIPLIER` to zero, or some value where + the final multiplied value is higher than ``flush_every``. + + In the future we hope to add the ability to direct batching tasks + to a channel with different QoS requirements than the task channel. + +**Simple Example** + +A click counter that flushes the buffer every 100 messages, and every +10 seconds. Does not do anything with the data, but can easily be modified +to store it in a database. + +.. code-block:: python + + # Flush after 100 messages, or 10 seconds. + @app.task(base=Batches, flush_every=100, flush_interval=10) + def count_click(requests): + from collections import Counter + count = Counter(request.kwargs['url'] for request in requests) + for url, count in count.items(): + print('>>> Clicks: {0} -> {1}'.format(url, count)) + + +Then you can ask for a click to be counted by doing:: + + >>> count_click.delay(url='http://example.com') + +**Example returning results** + +An interface to the Web of Trust API that flushes the buffer every 100 +messages, and every 10 seconds. + +.. code-block:: python + + import requests + from urlparse import urlparse + + from celery.contrib.batches import Batches + + wot_api_target = 'https://api.mywot.com/0.4/public_link_json' + + @app.task(base=Batches, flush_every=100, flush_interval=10) + def wot_api(requests): + sig = lambda url: url + reponses = wot_api_real( + (sig(*request.args, **request.kwargs) for request in requests) + ) + # use mark_as_done to manually return response data + for response, request in zip(reponses, requests): + app.backend.mark_as_done(request.id, response) + + + def wot_api_real(urls): + domains = [urlparse(url).netloc for url in urls] + response = requests.get( + wot_api_target, + params={'hosts': ('/').join(set(domains)) + '/'} + ) + return [response.json()[domain] for domain in domains] + +Using the API is done as follows:: + + >>> wot_api.delay('http://example.com') + +.. note:: + + If you don't have an ``app`` instance then use the current app proxy + instead:: + + from celery import current_app + app.backend.mark_as_done(request.id, response) + +""" +from __future__ import absolute_import + +from itertools import count + +from celery.task import Task +from celery.five import Empty, Queue +from celery.utils.log import get_logger +from celery.worker.job import Request +from celery.utils import noop + +__all__ = ['Batches'] + +logger = get_logger(__name__) + + +def consume_queue(queue): + """Iterator yielding all immediately available items in a + :class:`Queue.Queue`. + + The iterator stops as soon as the queue raises :exc:`Queue.Empty`. + + *Examples* + + >>> q = Queue() + >>> map(q.put, range(4)) + >>> list(consume_queue(q)) + [0, 1, 2, 3] + >>> list(consume_queue(q)) + [] + + """ + get = queue.get_nowait + while 1: + try: + yield get() + except Empty: + break + + +def apply_batches_task(task, args, loglevel, logfile): + task.push_request(loglevel=loglevel, logfile=logfile) + try: + result = task(*args) + except Exception as exc: + result = None + logger.error('Error: %r', exc, exc_info=True) + finally: + task.pop_request() + return result + + +class SimpleRequest(object): + """Pickleable request.""" + + #: task id + id = None + + #: task name + name = None + + #: positional arguments + args = () + + #: keyword arguments + kwargs = {} + + #: message delivery information. + delivery_info = None + + #: worker node name + hostname = None + + def __init__(self, id, name, args, kwargs, delivery_info, hostname): + self.id = id + self.name = name + self.args = args + self.kwargs = kwargs + self.delivery_info = delivery_info + self.hostname = hostname + + @classmethod + def from_request(cls, request): + return cls(request.id, request.name, request.args, + request.kwargs, request.delivery_info, request.hostname) + + +class Batches(Task): + abstract = True + + #: Maximum number of message in buffer. + flush_every = 10 + + #: Timeout in seconds before buffer is flushed anyway. + flush_interval = 30 + + def __init__(self): + self._buffer = Queue() + self._count = count(1) + self._tref = None + self._pool = None + + def run(self, requests): + raise NotImplementedError('must implement run(requests)') + + def Strategy(self, task, app, consumer): + self._pool = consumer.pool + hostname = consumer.hostname + eventer = consumer.event_dispatcher + Req = Request + connection_errors = consumer.connection_errors + timer = consumer.timer + put_buffer = self._buffer.put + flush_buffer = self._do_flush + + def task_message_handler(message, body, ack, reject, callbacks, **kw): + request = Req(body, on_ack=ack, app=app, hostname=hostname, + events=eventer, task=task, + connection_errors=connection_errors, + delivery_info=message.delivery_info) + put_buffer(request) + + if self._tref is None: # first request starts flush timer. + self._tref = timer.call_repeatedly( + self.flush_interval, flush_buffer, + ) + + if not next(self._count) % self.flush_every: + flush_buffer() + + return task_message_handler + + def flush(self, requests): + return self.apply_buffer(requests, ([SimpleRequest.from_request(r) + for r in requests], )) + + def _do_flush(self): + logger.debug('Batches: Wake-up to flush buffer...') + requests = None + if self._buffer.qsize(): + requests = list(consume_queue(self._buffer)) + if requests: + logger.debug('Batches: Buffer complete: %s', len(requests)) + self.flush(requests) + if not requests: + logger.debug('Batches: Canceling timer: Nothing in buffer.') + if self._tref: + self._tref.cancel() # cancel timer. + self._tref = None + + def apply_buffer(self, requests, args=(), kwargs={}): + acks_late = [], [] + [acks_late[r.task.acks_late].append(r) for r in requests] + assert requests and (acks_late[True] or acks_late[False]) + + def on_accepted(pid, time_accepted): + [req.acknowledge() for req in acks_late[False]] + + def on_return(result): + [req.acknowledge() for req in acks_late[True]] + + return self._pool.apply_async( + apply_batches_task, + (self, args, 0, None), + accept_callback=on_accepted, + callback=acks_late[True] and on_return or noop, + ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/methods.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/methods.py new file mode 100644 index 0000000..56aa7f4 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/contrib/methods.py @@ -0,0 +1,126 @@ +# -*- coding: utf-8 -*- +""" +celery.contrib.methods +====================== + +Task decorator that supports creating tasks out of methods. + +Examples +-------- + +.. code-block:: python + + from celery.contrib.methods import task + + class X(object): + + @task() + def add(self, x, y): + return x + y + +or with any task decorator: + +.. code-block:: python + + from celery.contrib.methods import task_method + + class X(object): + + @app.task(filter=task_method) + def add(self, x, y): + return x + y + +.. note:: + + The task must use the new Task base class (:class:`celery.Task`), + and the old base class using classmethods (``celery.task.Task``, + ``celery.task.base.Task``). + + This means that you have to use the task decorator from a Celery app + instance, and not the old-API: + + .. code-block:: python + + + from celery import task # BAD + from celery.task import task # ALSO BAD + + # GOOD: + app = Celery(...) + + @app.task(filter=task_method) + def foo(self): pass + + # ALSO GOOD: + from celery import current_app + + @current_app.task(filter=task_method) + def foo(self): pass + + # ALSO GOOD: + from celery import shared_task + + @shared_task(filter=task_method) + def foo(self): pass + +Caveats +------- + +- Automatic naming won't be able to know what the class name is. + + The name will still be module_name + task_name, + so two methods with the same name in the same module will collide + so that only one task can run: + + .. code-block:: python + + class A(object): + + @task() + def add(self, x, y): + return x + y + + class B(object): + + @task() + def add(self, x, y): + return x + y + + would have to be written as: + + .. code-block:: python + + class A(object): + @task(name='A.add') + def add(self, x, y): + return x + y + + class B(object): + @task(name='B.add') + def add(self, x, y): + return x + y + +""" + +from __future__ import absolute_import + +from celery import current_app + +__all__ = ['task_method', 'task'] + + +class task_method(object): + + def __init__(self, task, *args, **kwargs): + self.task = task + + def __get__(self, obj, type=None): + if obj is None: + return self.task + task = self.task.__class__() + task.__self__ = obj + return task + + +def task(*args, **kwargs): + return current_app.task(*args, **dict(kwargs, filter=task_method)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/migrate.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/migrate.py new file mode 100644 index 0000000..e4a10e9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/contrib/migrate.py @@ -0,0 +1,365 @@ +# -*- coding: utf-8 -*- +""" + celery.contrib.migrate + ~~~~~~~~~~~~~~~~~~~~~~ + + Migration tools. + +""" +from __future__ import absolute_import, print_function, unicode_literals + +import socket + +from functools import partial +from itertools import cycle, islice + +from kombu import eventloop, Queue +from kombu.common import maybe_declare +from kombu.utils.encoding import ensure_bytes + +from celery.app import app_or_default +from celery.five import string, string_t +from celery.utils import worker_direct + +__all__ = ['StopFiltering', 'State', 'republish', 'migrate_task', + 'migrate_tasks', 'move', 'task_id_eq', 'task_id_in', + 'start_filter', 'move_task_by_id', 'move_by_idmap', + 'move_by_taskmap', 'move_direct', 'move_direct_by_id'] + +MOVING_PROGRESS_FMT = """\ +Moving task {state.filtered}/{state.strtotal}: \ +{body[task]}[{body[id]}]\ +""" + + +class StopFiltering(Exception): + pass + + +class State(object): + count = 0 + filtered = 0 + total_apx = 0 + + @property + def strtotal(self): + if not self.total_apx: + return '?' + return string(self.total_apx) + + def __repr__(self): + if self.filtered: + return '^{0.filtered}'.format(self) + return '{0.count}/{0.strtotal}'.format(self) + + +def republish(producer, message, exchange=None, routing_key=None, + remove_props=['application_headers', + 'content_type', + 'content_encoding', + 'headers']): + body = ensure_bytes(message.body) # use raw message body. + info, headers, props = (message.delivery_info, + message.headers, message.properties) + exchange = info['exchange'] if exchange is None else exchange + routing_key = info['routing_key'] if routing_key is None else routing_key + ctype, enc = message.content_type, message.content_encoding + # remove compression header, as this will be inserted again + # when the message is recompressed. + compression = headers.pop('compression', None) + + for key in remove_props: + props.pop(key, None) + + producer.publish(ensure_bytes(body), exchange=exchange, + routing_key=routing_key, compression=compression, + headers=headers, content_type=ctype, + content_encoding=enc, **props) + + +def migrate_task(producer, body_, message, queues=None): + info = message.delivery_info + queues = {} if queues is None else queues + republish(producer, message, + exchange=queues.get(info['exchange']), + routing_key=queues.get(info['routing_key'])) + + +def filter_callback(callback, tasks): + + def filtered(body, message): + if tasks and body['task'] not in tasks: + return + + return callback(body, message) + return filtered + + +def migrate_tasks(source, dest, migrate=migrate_task, app=None, + queues=None, **kwargs): + app = app_or_default(app) + queues = prepare_queues(queues) + producer = app.amqp.TaskProducer(dest) + migrate = partial(migrate, producer, queues=queues) + + def on_declare_queue(queue): + new_queue = queue(producer.channel) + new_queue.name = queues.get(queue.name, queue.name) + if new_queue.routing_key == queue.name: + new_queue.routing_key = queues.get(queue.name, + new_queue.routing_key) + if new_queue.exchange.name == queue.name: + new_queue.exchange.name = queues.get(queue.name, queue.name) + new_queue.declare() + + return start_filter(app, source, migrate, queues=queues, + on_declare_queue=on_declare_queue, **kwargs) + + +def _maybe_queue(app, q): + if isinstance(q, string_t): + return app.amqp.queues[q] + return q + + +def move(predicate, connection=None, exchange=None, routing_key=None, + source=None, app=None, callback=None, limit=None, transform=None, + **kwargs): + """Find tasks by filtering them and move the tasks to a new queue. + + :param predicate: Filter function used to decide which messages + to move. Must accept the standard signature of ``(body, message)`` + used by Kombu consumer callbacks. If the predicate wants the message + to be moved it must return either: + + 1) a tuple of ``(exchange, routing_key)``, or + + 2) a :class:`~kombu.entity.Queue` instance, or + + 3) any other true value which means the specified + ``exchange`` and ``routing_key`` arguments will be used. + + :keyword connection: Custom connection to use. + :keyword source: Optional list of source queues to use instead of the + default (which is the queues in :setting:`CELERY_QUEUES`). + This list can also contain new :class:`~kombu.entity.Queue` instances. + :keyword exchange: Default destination exchange. + :keyword routing_key: Default destination routing key. + :keyword limit: Limit number of messages to filter. + :keyword callback: Callback called after message moved, + with signature ``(state, body, message)``. + :keyword transform: Optional function to transform the return + value (destination) of the filter function. + + Also supports the same keyword arguments as :func:`start_filter`. + + To demonstrate, the :func:`move_task_by_id` operation can be implemented + like this: + + .. code-block:: python + + def is_wanted_task(body, message): + if body['id'] == wanted_id: + return Queue('foo', exchange=Exchange('foo'), + routing_key='foo') + + move(is_wanted_task) + + or with a transform: + + .. code-block:: python + + def transform(value): + if isinstance(value, string_t): + return Queue(value, Exchange(value), value) + return value + + move(is_wanted_task, transform=transform) + + The predicate may also return a tuple of ``(exchange, routing_key)`` + to specify the destination to where the task should be moved, + or a :class:`~kombu.entitiy.Queue` instance. + Any other true value means that the task will be moved to the + default exchange/routing_key. + + """ + app = app_or_default(app) + queues = [_maybe_queue(app, queue) for queue in source or []] or None + with app.connection_or_acquire(connection, pool=False) as conn: + producer = app.amqp.TaskProducer(conn) + state = State() + + def on_task(body, message): + ret = predicate(body, message) + if ret: + if transform: + ret = transform(ret) + if isinstance(ret, Queue): + maybe_declare(ret, conn.default_channel) + ex, rk = ret.exchange.name, ret.routing_key + else: + ex, rk = expand_dest(ret, exchange, routing_key) + republish(producer, message, + exchange=ex, routing_key=rk) + message.ack() + + state.filtered += 1 + if callback: + callback(state, body, message) + if limit and state.filtered >= limit: + raise StopFiltering() + + return start_filter(app, conn, on_task, consume_from=queues, **kwargs) + + +def expand_dest(ret, exchange, routing_key): + try: + ex, rk = ret + except (TypeError, ValueError): + ex, rk = exchange, routing_key + return ex, rk + + +def task_id_eq(task_id, body, message): + return body['id'] == task_id + + +def task_id_in(ids, body, message): + return body['id'] in ids + + +def prepare_queues(queues): + if isinstance(queues, string_t): + queues = queues.split(',') + if isinstance(queues, list): + queues = dict(tuple(islice(cycle(q.split(':')), None, 2)) + for q in queues) + if queues is None: + queues = {} + return queues + + +def start_filter(app, conn, filter, limit=None, timeout=1.0, + ack_messages=False, tasks=None, queues=None, + callback=None, forever=False, on_declare_queue=None, + consume_from=None, state=None, accept=None, **kwargs): + state = state or State() + queues = prepare_queues(queues) + consume_from = [_maybe_queue(app, q) + for q in consume_from or list(queues)] + if isinstance(tasks, string_t): + tasks = set(tasks.split(',')) + if tasks is None: + tasks = set([]) + + def update_state(body, message): + state.count += 1 + if limit and state.count >= limit: + raise StopFiltering() + + def ack_message(body, message): + message.ack() + + consumer = app.amqp.TaskConsumer(conn, queues=consume_from, accept=accept) + + if tasks: + filter = filter_callback(filter, tasks) + update_state = filter_callback(update_state, tasks) + ack_message = filter_callback(ack_message, tasks) + + consumer.register_callback(filter) + consumer.register_callback(update_state) + if ack_messages: + consumer.register_callback(ack_message) + if callback is not None: + callback = partial(callback, state) + if tasks: + callback = filter_callback(callback, tasks) + consumer.register_callback(callback) + + # declare all queues on the new broker. + for queue in consumer.queues: + if queues and queue.name not in queues: + continue + if on_declare_queue is not None: + on_declare_queue(queue) + try: + _, mcount, _ = queue(consumer.channel).queue_declare(passive=True) + if mcount: + state.total_apx += mcount + except conn.channel_errors: + pass + + # start migrating messages. + with consumer: + try: + for _ in eventloop(conn, # pragma: no cover + timeout=timeout, ignore_timeouts=forever): + pass + except socket.timeout: + pass + except StopFiltering: + pass + return state + + +def move_task_by_id(task_id, dest, **kwargs): + """Find a task by id and move it to another queue. + + :param task_id: Id of task to move. + :param dest: Destination queue. + + Also supports the same keyword arguments as :func:`move`. + + """ + return move_by_idmap({task_id: dest}, **kwargs) + + +def move_by_idmap(map, **kwargs): + """Moves tasks by matching from a ``task_id: queue`` mapping, + where ``queue`` is a queue to move the task to. + + Example:: + + >>> move_by_idmap({ + ... '5bee6e82-f4ac-468e-bd3d-13e8600250bc': Queue('name'), + ... 'ada8652d-aef3-466b-abd2-becdaf1b82b3': Queue('name'), + ... '3a2b140d-7db1-41ba-ac90-c36a0ef4ab1f': Queue('name')}, + ... queues=['hipri']) + + """ + def task_id_in_map(body, message): + return map.get(body['id']) + + # adding the limit means that we don't have to consume any more + # when we've found everything. + return move(task_id_in_map, limit=len(map), **kwargs) + + +def move_by_taskmap(map, **kwargs): + """Moves tasks by matching from a ``task_name: queue`` mapping, + where ``queue`` is the queue to move the task to. + + Example:: + + >>> move_by_taskmap({ + ... 'tasks.add': Queue('name'), + ... 'tasks.mul': Queue('name'), + ... }) + + """ + + def task_name_in_map(body, message): + return map.get(body['task']) # <- name of task + + return move(task_name_in_map, **kwargs) + + +def filter_status(state, body, message, **kwargs): + print(MOVING_PROGRESS_FMT.format(state=state, body=body, **kwargs)) + + +move_direct = partial(move, transform=worker_direct) +move_direct_by_id = partial(move_task_by_id, transform=worker_direct) +move_direct_by_idmap = partial(move_by_idmap, transform=worker_direct) +move_direct_by_taskmap = partial(move_by_taskmap, transform=worker_direct) diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/rdb.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/rdb.py new file mode 100644 index 0000000..3f218ae --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/contrib/rdb.py @@ -0,0 +1,183 @@ +# -*- coding: utf-8 -*- +""" +celery.contrib.rdb +================== + +Remote debugger for Celery tasks running in multiprocessing pool workers. +Inspired by http://snippets.dzone.com/posts/show/7248 + +**Usage** + +.. code-block:: python + + from celery.contrib import rdb + from celery import task + + @task() + def add(x, y): + result = x + y + rdb.set_trace() + return result + + +**Environment Variables** + +.. envvar:: CELERY_RDB_HOST + + Hostname to bind to. Default is '127.0.01', which means the socket + will only be accessible from the local host. + +.. envvar:: CELERY_RDB_PORT + + Base port to bind to. Default is 6899. + The debugger will try to find an available port starting from the + base port. The selected port will be logged by the worker. + +""" +from __future__ import absolute_import, print_function + +import errno +import os +import socket +import sys + +from pdb import Pdb + +from billiard import current_process + +from celery.five import range + +__all__ = ['CELERY_RDB_HOST', 'CELERY_RDB_PORT', 'default_port', + 'Rdb', 'debugger', 'set_trace'] + +default_port = 6899 + +CELERY_RDB_HOST = os.environ.get('CELERY_RDB_HOST') or '127.0.0.1' +CELERY_RDB_PORT = int(os.environ.get('CELERY_RDB_PORT') or default_port) + +#: Holds the currently active debugger. +_current = [None] + +_frame = getattr(sys, '_getframe') + +NO_AVAILABLE_PORT = """\ +{self.ident}: Couldn't find an available port. + +Please specify one using the CELERY_RDB_PORT environment variable. +""" + +BANNER = """\ +{self.ident}: Please telnet into {self.host} {self.port}. + +Type `exit` in session to continue. + +{self.ident}: Waiting for client... +""" + +SESSION_STARTED = '{self.ident}: Now in session with {self.remote_addr}.' +SESSION_ENDED = '{self.ident}: Session with {self.remote_addr} ended.' + + +class Rdb(Pdb): + me = 'Remote Debugger' + _prev_outs = None + _sock = None + + def __init__(self, host=CELERY_RDB_HOST, port=CELERY_RDB_PORT, + port_search_limit=100, port_skew=+0, out=sys.stdout): + self.active = True + self.out = out + + self._prev_handles = sys.stdin, sys.stdout + + self._sock, this_port = self.get_avail_port( + host, port, port_search_limit, port_skew, + ) + self._sock.setblocking(1) + self._sock.listen(1) + self.ident = '{0}:{1}'.format(self.me, this_port) + self.host = host + self.port = this_port + self.say(BANNER.format(self=self)) + + self._client, address = self._sock.accept() + self._client.setblocking(1) + self.remote_addr = ':'.join(str(v) for v in address) + self.say(SESSION_STARTED.format(self=self)) + self._handle = sys.stdin = sys.stdout = self._client.makefile('rw') + Pdb.__init__(self, completekey='tab', + stdin=self._handle, stdout=self._handle) + + def get_avail_port(self, host, port, search_limit=100, skew=+0): + try: + _, skew = current_process().name.split('-') + skew = int(skew) + except ValueError: + pass + this_port = None + for i in range(search_limit): + _sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + this_port = port + skew + i + try: + _sock.bind((host, this_port)) + except socket.error as exc: + if exc.errno in [errno.EADDRINUSE, errno.EINVAL]: + continue + raise + else: + return _sock, this_port + else: + raise Exception(NO_AVAILABLE_PORT.format(self=self)) + + def say(self, m): + print(m, file=self.out) + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self._close_session() + + def _close_session(self): + self.stdin, self.stdout = sys.stdin, sys.stdout = self._prev_handles + if self.active: + if self._handle is not None: + self._handle.close() + if self._client is not None: + self._client.close() + if self._sock is not None: + self._sock.close() + self.active = False + self.say(SESSION_ENDED.format(self=self)) + + def do_continue(self, arg): + self._close_session() + self.set_continue() + return 1 + do_c = do_cont = do_continue + + def do_quit(self, arg): + self._close_session() + self.set_quit() + return 1 + do_q = do_exit = do_quit + + def set_quit(self): + # this raises a BdbQuit exception that we are unable to catch. + sys.settrace(None) + + +def debugger(): + """Return the current debugger instance (if any), + or creates a new one.""" + rdb = _current[0] + if rdb is None or not rdb.active: + rdb = _current[0] = Rdb() + return rdb + + +def set_trace(frame=None): + """Set breakpoint at current location, or a specified frame""" + if frame is None: + frame = _frame().f_back + return debugger().set_trace(frame) diff --git a/thesisenv/lib/python3.6/site-packages/celery/contrib/sphinx.py b/thesisenv/lib/python3.6/site-packages/celery/contrib/sphinx.py new file mode 100644 index 0000000..2e57431 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/contrib/sphinx.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +""" +celery.contrib.sphinx +===================== + +Sphinx documentation plugin + +**Usage** + +Add the extension to your :file:`docs/conf.py` configuration module: + +.. code-block:: python + + extensions = (..., + 'celery.contrib.sphinx') + +If you would like to change the prefix for tasks in reference documentation +then you can change the ``celery_task_prefix`` configuration value: + +.. code-block:: python + + celery_task_prefix = '(task)' # < default + + +With the extension installed `autodoc` will automatically find +task decorated objects and generate the correct (as well as +add a ``(task)`` prefix), and you can also refer to the tasks +using `:task:proj.tasks.add` syntax. + +Use ``.. autotask::`` to manually document a task. + +""" +from __future__ import absolute_import + +try: + from inspect import formatargspec, getfullargspec as getargspec +except ImportError: # Py2 + from inspect import formatargspec, getargspec # noqa + +from sphinx.domains.python import PyModulelevel +from sphinx.ext.autodoc import FunctionDocumenter + +from celery.app.task import BaseTask + + +class TaskDocumenter(FunctionDocumenter): + objtype = 'task' + member_order = 11 + + @classmethod + def can_document_member(cls, member, membername, isattr, parent): + return isinstance(member, BaseTask) and getattr(member, '__wrapped__') + + def format_args(self): + wrapped = getattr(self.object, '__wrapped__') + if wrapped is not None: + argspec = getargspec(wrapped) + fmt = formatargspec(*argspec) + fmt = fmt.replace('\\', '\\\\') + return fmt + return '' + + def document_members(self, all_members=False): + pass + + +class TaskDirective(PyModulelevel): + + def get_signature_prefix(self, sig): + return self.env.config.celery_task_prefix + + +def setup(app): + app.add_autodocumenter(TaskDocumenter) + app.domains['py'].directives['task'] = TaskDirective + app.add_config_value('celery_task_prefix', '(task)', True) diff --git a/thesisenv/lib/python3.6/site-packages/celery/datastructures.py b/thesisenv/lib/python3.6/site-packages/celery/datastructures.py new file mode 100644 index 0000000..32a1d54 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/datastructures.py @@ -0,0 +1,671 @@ +# -*- coding: utf-8 -*- +""" + celery.datastructures + ~~~~~~~~~~~~~~~~~~~~~ + + Custom types and data structures. + +""" +from __future__ import absolute_import, print_function, unicode_literals + +import sys +import time + +from collections import defaultdict, Mapping, MutableMapping, MutableSet +from heapq import heapify, heappush, heappop +from functools import partial +from itertools import chain + +from billiard.einfo import ExceptionInfo # noqa +from kombu.utils.encoding import safe_str +from kombu.utils.limits import TokenBucket # noqa + +from celery.five import items +from celery.utils.functional import LRUCache, first, uniq # noqa + +try: + from django.utils.functional import LazyObject, LazySettings +except ImportError: + class LazyObject(object): # noqa + pass + LazySettings = LazyObject # noqa + +DOT_HEAD = """ +{IN}{type} {id} {{ +{INp}graph [{attrs}] +""" +DOT_ATTR = '{name}={value}' +DOT_NODE = '{INp}"{0}" [{attrs}]' +DOT_EDGE = '{INp}"{0}" {dir} "{1}" [{attrs}]' +DOT_ATTRSEP = ', ' +DOT_DIRS = {'graph': '--', 'digraph': '->'} +DOT_TAIL = '{IN}}}' + +__all__ = ['GraphFormatter', 'CycleError', 'DependencyGraph', + 'AttributeDictMixin', 'AttributeDict', 'DictAttribute', + 'ConfigurationView', 'LimitedSet'] + + +def force_mapping(m): + if isinstance(m, (LazyObject, LazySettings)): + m = m._wrapped + return DictAttribute(m) if not isinstance(m, Mapping) else m + + +class GraphFormatter(object): + _attr = DOT_ATTR.strip() + _node = DOT_NODE.strip() + _edge = DOT_EDGE.strip() + _head = DOT_HEAD.strip() + _tail = DOT_TAIL.strip() + _attrsep = DOT_ATTRSEP + _dirs = dict(DOT_DIRS) + + scheme = { + 'shape': 'box', + 'arrowhead': 'vee', + 'style': 'filled', + 'fontname': 'HelveticaNeue', + } + edge_scheme = { + 'color': 'darkseagreen4', + 'arrowcolor': 'black', + 'arrowsize': 0.7, + } + node_scheme = {'fillcolor': 'palegreen3', 'color': 'palegreen4'} + term_scheme = {'fillcolor': 'palegreen1', 'color': 'palegreen2'} + graph_scheme = {'bgcolor': 'mintcream'} + + def __init__(self, root=None, type=None, id=None, + indent=0, inw=' ' * 4, **scheme): + self.id = id or 'dependencies' + self.root = root + self.type = type or 'digraph' + self.direction = self._dirs[self.type] + self.IN = inw * (indent or 0) + self.INp = self.IN + inw + self.scheme = dict(self.scheme, **scheme) + self.graph_scheme = dict(self.graph_scheme, root=self.label(self.root)) + + def attr(self, name, value): + value = '"{0}"'.format(value) + return self.FMT(self._attr, name=name, value=value) + + def attrs(self, d, scheme=None): + d = dict(self.scheme, **dict(scheme, **d or {}) if scheme else d) + return self._attrsep.join( + safe_str(self.attr(k, v)) for k, v in items(d) + ) + + def head(self, **attrs): + return self.FMT( + self._head, id=self.id, type=self.type, + attrs=self.attrs(attrs, self.graph_scheme), + ) + + def tail(self): + return self.FMT(self._tail) + + def label(self, obj): + return obj + + def node(self, obj, **attrs): + return self.draw_node(obj, self.node_scheme, attrs) + + def terminal_node(self, obj, **attrs): + return self.draw_node(obj, self.term_scheme, attrs) + + def edge(self, a, b, **attrs): + return self.draw_edge(a, b, **attrs) + + def _enc(self, s): + return s.encode('utf-8', 'ignore') + + def FMT(self, fmt, *args, **kwargs): + return self._enc(fmt.format( + *args, **dict(kwargs, IN=self.IN, INp=self.INp) + )) + + def draw_edge(self, a, b, scheme=None, attrs=None): + return self.FMT( + self._edge, self.label(a), self.label(b), + dir=self.direction, attrs=self.attrs(attrs, self.edge_scheme), + ) + + def draw_node(self, obj, scheme=None, attrs=None): + return self.FMT( + self._node, self.label(obj), attrs=self.attrs(attrs, scheme), + ) + + +class CycleError(Exception): + """A cycle was detected in an acyclic graph.""" + + +class DependencyGraph(object): + """A directed acyclic graph of objects and their dependencies. + + Supports a robust topological sort + to detect the order in which they must be handled. + + Takes an optional iterator of ``(obj, dependencies)`` + tuples to build the graph from. + + .. warning:: + + Does not support cycle detection. + + """ + + def __init__(self, it=None, formatter=None): + self.formatter = formatter or GraphFormatter() + self.adjacent = {} + if it is not None: + self.update(it) + + def add_arc(self, obj): + """Add an object to the graph.""" + self.adjacent.setdefault(obj, []) + + def add_edge(self, A, B): + """Add an edge from object ``A`` to object ``B`` + (``A`` depends on ``B``).""" + self[A].append(B) + + def connect(self, graph): + """Add nodes from another graph.""" + self.adjacent.update(graph.adjacent) + + def topsort(self): + """Sort the graph topologically. + + :returns: a list of objects in the order + in which they must be handled. + + """ + graph = DependencyGraph() + components = self._tarjan72() + + NC = dict((node, component) + for component in components + for node in component) + for component in components: + graph.add_arc(component) + for node in self: + node_c = NC[node] + for successor in self[node]: + successor_c = NC[successor] + if node_c != successor_c: + graph.add_edge(node_c, successor_c) + return [t[0] for t in graph._khan62()] + + def valency_of(self, obj): + """Return the valency (degree) of a vertex in the graph.""" + try: + l = [len(self[obj])] + except KeyError: + return 0 + for node in self[obj]: + l.append(self.valency_of(node)) + return sum(l) + + def update(self, it): + """Update the graph with data from a list + of ``(obj, dependencies)`` tuples.""" + tups = list(it) + for obj, _ in tups: + self.add_arc(obj) + for obj, deps in tups: + for dep in deps: + self.add_edge(obj, dep) + + def edges(self): + """Return generator that yields for all edges in the graph.""" + return (obj for obj, adj in items(self) if adj) + + def _khan62(self): + """Khans simple topological sort algorithm from '62 + + See http://en.wikipedia.org/wiki/Topological_sorting + + """ + count = defaultdict(lambda: 0) + result = [] + + for node in self: + for successor in self[node]: + count[successor] += 1 + ready = [node for node in self if not count[node]] + + while ready: + node = ready.pop() + result.append(node) + + for successor in self[node]: + count[successor] -= 1 + if count[successor] == 0: + ready.append(successor) + result.reverse() + return result + + def _tarjan72(self): + """Tarjan's algorithm to find strongly connected components. + + See http://bit.ly/vIMv3h. + + """ + result, stack, low = [], [], {} + + def visit(node): + if node in low: + return + num = len(low) + low[node] = num + stack_pos = len(stack) + stack.append(node) + + for successor in self[node]: + visit(successor) + low[node] = min(low[node], low[successor]) + + if num == low[node]: + component = tuple(stack[stack_pos:]) + stack[stack_pos:] = [] + result.append(component) + for item in component: + low[item] = len(self) + + for node in self: + visit(node) + + return result + + def to_dot(self, fh, formatter=None): + """Convert the graph to DOT format. + + :param fh: A file, or a file-like object to write the graph to. + + """ + seen = set() + draw = formatter or self.formatter + P = partial(print, file=fh) + + def if_not_seen(fun, obj): + if draw.label(obj) not in seen: + P(fun(obj)) + seen.add(draw.label(obj)) + + P(draw.head()) + for obj, adjacent in items(self): + if not adjacent: + if_not_seen(draw.terminal_node, obj) + for req in adjacent: + if_not_seen(draw.node, obj) + P(draw.edge(obj, req)) + P(draw.tail()) + + def format(self, obj): + return self.formatter(obj) if self.formatter else obj + + def __iter__(self): + return iter(self.adjacent) + + def __getitem__(self, node): + return self.adjacent[node] + + def __len__(self): + return len(self.adjacent) + + def __contains__(self, obj): + return obj in self.adjacent + + def _iterate_items(self): + return items(self.adjacent) + items = iteritems = _iterate_items + + def __repr__(self): + return '\n'.join(self.repr_node(N) for N in self) + + def repr_node(self, obj, level=1, fmt='{0}({1})'): + output = [fmt.format(obj, self.valency_of(obj))] + if obj in self: + for other in self[obj]: + d = fmt.format(other, self.valency_of(other)) + output.append(' ' * level + d) + output.extend(self.repr_node(other, level + 1).split('\n')[1:]) + return '\n'.join(output) + + +class AttributeDictMixin(object): + """Augment classes with a Mapping interface by adding attribute access. + + I.e. `d.key -> d[key]`. + + """ + + def __getattr__(self, k): + """`d.key -> d[key]`""" + try: + return self[k] + except KeyError: + raise AttributeError( + '{0!r} object has no attribute {1!r}'.format( + type(self).__name__, k)) + + def __setattr__(self, key, value): + """`d[key] = value -> d.key = value`""" + self[key] = value + + +class AttributeDict(dict, AttributeDictMixin): + """Dict subclass with attribute access.""" + pass + + +class DictAttribute(object): + """Dict interface to attributes. + + `obj[k] -> obj.k` + `obj[k] = val -> obj.k = val` + + """ + obj = None + + def __init__(self, obj): + object.__setattr__(self, 'obj', obj) + + def __getattr__(self, key): + return getattr(self.obj, key) + + def __setattr__(self, key, value): + return setattr(self.obj, key, value) + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def setdefault(self, key, default): + try: + return self[key] + except KeyError: + self[key] = default + return default + + def __getitem__(self, key): + try: + return getattr(self.obj, key) + except AttributeError: + raise KeyError(key) + + def __setitem__(self, key, value): + setattr(self.obj, key, value) + + def __contains__(self, key): + return hasattr(self.obj, key) + + def _iterate_keys(self): + return iter(dir(self.obj)) + iterkeys = _iterate_keys + + def __iter__(self): + return self._iterate_keys() + + def _iterate_items(self): + for key in self._iterate_keys(): + yield key, getattr(self.obj, key) + iteritems = _iterate_items + + def _iterate_values(self): + for key in self._iterate_keys(): + yield getattr(self.obj, key) + itervalues = _iterate_values + + if sys.version_info[0] == 3: # pragma: no cover + items = _iterate_items + keys = _iterate_keys + values = _iterate_values + else: + + def keys(self): + return list(self) + + def items(self): + return list(self._iterate_items()) + + def values(self): + return list(self._iterate_values()) +MutableMapping.register(DictAttribute) + + +class ConfigurationView(AttributeDictMixin): + """A view over an applications configuration dicts. + + Custom (but older) version of :class:`collections.ChainMap`. + + If the key does not exist in ``changes``, the ``defaults`` dicts + are consulted. + + :param changes: Dict containing changes to the configuration. + :param defaults: List of dicts containing the default configuration. + + """ + changes = None + defaults = None + _order = None + + def __init__(self, changes, defaults): + self.__dict__.update(changes=changes, defaults=defaults, + _order=[changes] + defaults) + + def add_defaults(self, d): + d = force_mapping(d) + self.defaults.insert(0, d) + self._order.insert(1, d) + + def __getitem__(self, key): + for d in self._order: + try: + return d[key] + except KeyError: + pass + raise KeyError(key) + + def __setitem__(self, key, value): + self.changes[key] = value + + def first(self, *keys): + return first(None, (self.get(key) for key in keys)) + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def clear(self): + """Remove all changes, but keep defaults.""" + self.changes.clear() + + def setdefault(self, key, default): + try: + return self[key] + except KeyError: + self[key] = default + return default + + def update(self, *args, **kwargs): + return self.changes.update(*args, **kwargs) + + def __contains__(self, key): + return any(key in m for m in self._order) + + def __bool__(self): + return any(self._order) + __nonzero__ = __bool__ # Py2 + + def __repr__(self): + return repr(dict(items(self))) + + def __iter__(self): + return self._iterate_keys() + + def __len__(self): + # The logic for iterating keys includes uniq(), + # so to be safe we count by explicitly iterating + return len(set().union(*self._order)) + + def _iter(self, op): + # defaults must be first in the stream, so values in + # changes takes precedence. + return chain(*[op(d) for d in reversed(self._order)]) + + def _iterate_keys(self): + return uniq(self._iter(lambda d: d)) + iterkeys = _iterate_keys + + def _iterate_items(self): + return ((key, self[key]) for key in self) + iteritems = _iterate_items + + def _iterate_values(self): + return (self[key] for key in self) + itervalues = _iterate_values + + if sys.version_info[0] == 3: # pragma: no cover + keys = _iterate_keys + items = _iterate_items + values = _iterate_values + + else: # noqa + def keys(self): + return list(self._iterate_keys()) + + def items(self): + return list(self._iterate_items()) + + def values(self): + return list(self._iterate_values()) + +MutableMapping.register(ConfigurationView) + + +class LimitedSet(object): + """Kind-of Set with limitations. + + Good for when you need to test for membership (`a in set`), + but the set should not grow unbounded. + + :keyword maxlen: Maximum number of members before we start + evicting expired members. + :keyword expires: Time in seconds, before a membership expires. + + """ + + def __init__(self, maxlen=None, expires=None, data=None, heap=None): + # heap is ignored + self.maxlen = maxlen + self.expires = expires + self._data = {} if data is None else data + self._heap = [] + + # make shortcuts + self.__len__ = self._heap.__len__ + self.__contains__ = self._data.__contains__ + + self._refresh_heap() + + def _refresh_heap(self): + self._heap[:] = [(t, key) for key, t in items(self._data)] + heapify(self._heap) + + def add(self, key, now=time.time, heappush=heappush): + """Add a new member.""" + # offset is there to modify the length of the list, + # this way we can expire an item before inserting the value, + # and it will end up in the correct order. + self.purge(1, offset=1) + inserted = now() + self._data[key] = inserted + heappush(self._heap, (inserted, key)) + + def clear(self): + """Remove all members""" + self._data.clear() + self._heap[:] = [] + + def discard(self, value): + """Remove membership by finding value.""" + try: + itime = self._data[value] + except KeyError: + return + try: + self._heap.remove((itime, value)) + except ValueError: + pass + self._data.pop(value, None) + pop_value = discard # XXX compat + + def purge(self, limit=None, offset=0, now=time.time): + """Purge expired items.""" + H, maxlen = self._heap, self.maxlen + if not maxlen: + return + + # If the data/heap gets corrupted and limit is None + # this will go into an infinite loop, so limit must + # have a value to guard the loop. + limit = len(self) + offset if limit is None else limit + + i = 0 + while len(self) + offset > maxlen: + if i >= limit: + break + try: + item = heappop(H) + except IndexError: + break + if self.expires: + if now() < item[0] + self.expires: + heappush(H, item) + break + try: + self._data.pop(item[1]) + except KeyError: # out of sync with heap + pass + i += 1 + + def update(self, other): + if isinstance(other, LimitedSet): + self._data.update(other._data) + self._refresh_heap() + else: + for obj in other: + self.add(obj) + + def as_dict(self): + return self._data + + def __eq__(self, other): + return self._heap == other._heap + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return 'LimitedSet({0})'.format(len(self)) + + def __iter__(self): + return (item[1] for item in self._heap) + + def __len__(self): + return len(self._heap) + + def __contains__(self, key): + return key in self._data + + def __reduce__(self): + return self.__class__, (self.maxlen, self.expires, self._data) +MutableSet.register(LimitedSet) diff --git a/thesisenv/lib/python3.6/site-packages/celery/events/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/events/__init__.py new file mode 100644 index 0000000..65809cf --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/events/__init__.py @@ -0,0 +1,408 @@ +# -*- coding: utf-8 -*- +""" + celery.events + ~~~~~~~~~~~~~ + + Events is a stream of messages sent for certain actions occurring + in the worker (and clients if :setting:`CELERY_SEND_TASK_SENT_EVENT` + is enabled), used for monitoring purposes. + +""" +from __future__ import absolute_import + +import os +import time +import threading +import warnings + +from collections import deque +from contextlib import contextmanager +from copy import copy +from operator import itemgetter + +from kombu import Exchange, Queue, Producer +from kombu.connection import maybe_channel +from kombu.mixins import ConsumerMixin +from kombu.utils import cached_property + +from celery.app import app_or_default +from celery.utils import anon_nodename, uuid +from celery.utils.functional import dictfilter +from celery.utils.timeutils import adjust_timestamp, utcoffset, maybe_s_to_ms + +__all__ = ['Events', 'Event', 'EventDispatcher', 'EventReceiver'] + +event_exchange = Exchange('celeryev', type='topic') + +_TZGETTER = itemgetter('utcoffset', 'timestamp') + +W_YAJL = """ +anyjson is currently using the yajl library. +This json implementation is broken, it severely truncates floats +so timestamps will not work. + +Please uninstall yajl or force anyjson to use a different library. +""" + +CLIENT_CLOCK_SKEW = -1 + + +def get_exchange(conn): + ex = copy(event_exchange) + if conn.transport.driver_type == 'redis': + # quick hack for Issue #436 + ex.type = 'fanout' + return ex + + +def Event(type, _fields=None, __dict__=dict, __now__=time.time, **fields): + """Create an event. + + An event is a dictionary, the only required field is ``type``. + A ``timestamp`` field will be set to the current time if not provided. + + """ + event = __dict__(_fields, **fields) if _fields else fields + if 'timestamp' not in event: + event.update(timestamp=__now__(), type=type) + else: + event['type'] = type + return event + + +def group_from(type): + """Get the group part of an event type name. + + E.g.:: + + >>> group_from('task-sent') + 'task' + + >>> group_from('custom-my-event') + 'custom' + + """ + return type.split('-', 1)[0] + + +class EventDispatcher(object): + """Dispatches event messages. + + :param connection: Connection to the broker. + + :keyword hostname: Hostname to identify ourselves as, + by default uses the hostname returned by + :func:`~celery.utils.anon_nodename`. + + :keyword groups: List of groups to send events for. :meth:`send` will + ignore send requests to groups not in this list. + If this is :const:`None`, all events will be sent. Example groups + include ``"task"`` and ``"worker"``. + + :keyword enabled: Set to :const:`False` to not actually publish any events, + making :meth:`send` a noop operation. + + :keyword channel: Can be used instead of `connection` to specify + an exact channel to use when sending events. + + :keyword buffer_while_offline: If enabled events will be buffered + while the connection is down. :meth:`flush` must be called + as soon as the connection is re-established. + + You need to :meth:`close` this after use. + + """ + DISABLED_TRANSPORTS = set(['sql']) + + app = None + + # set of callbacks to be called when :meth:`enabled`. + on_enabled = None + + # set of callbacks to be called when :meth:`disabled`. + on_disabled = None + + def __init__(self, connection=None, hostname=None, enabled=True, + channel=None, buffer_while_offline=True, app=None, + serializer=None, groups=None): + self.app = app_or_default(app or self.app) + self.connection = connection + self.channel = channel + self.hostname = hostname or anon_nodename() + self.buffer_while_offline = buffer_while_offline + self.mutex = threading.Lock() + self.producer = None + self._outbound_buffer = deque() + self.serializer = serializer or self.app.conf.CELERY_EVENT_SERIALIZER + self.on_enabled = set() + self.on_disabled = set() + self.groups = set(groups or []) + self.tzoffset = [-time.timezone, -time.altzone] + self.clock = self.app.clock + if not connection and channel: + self.connection = channel.connection.client + self.enabled = enabled + conninfo = self.connection or self.app.connection() + self.exchange = get_exchange(conninfo) + if conninfo.transport.driver_type in self.DISABLED_TRANSPORTS: + self.enabled = False + if self.enabled: + self.enable() + self.headers = {'hostname': self.hostname} + self.pid = os.getpid() + self.warn_if_yajl() + + def warn_if_yajl(self): + import anyjson + if anyjson.implementation.name == 'yajl': + warnings.warn(UserWarning(W_YAJL)) + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + + def enable(self): + self.producer = Producer(self.channel or self.connection, + exchange=self.exchange, + serializer=self.serializer) + self.enabled = True + for callback in self.on_enabled: + callback() + + def disable(self): + if self.enabled: + self.enabled = False + self.close() + for callback in self.on_disabled: + callback() + + def publish(self, type, fields, producer, retry=False, + retry_policy=None, blind=False, utcoffset=utcoffset, + Event=Event): + """Publish event using a custom :class:`~kombu.Producer` + instance. + + :param type: Event type name, with group separated by dash (`-`). + :param fields: Dictionary of event fields, must be json serializable. + :param producer: :class:`~kombu.Producer` instance to use, + only the ``publish`` method will be called. + :keyword retry: Retry in the event of connection failure. + :keyword retry_policy: Dict of custom retry policy, see + :meth:`~kombu.Connection.ensure`. + :keyword blind: Don't set logical clock value (also do not forward + the internal logical clock). + :keyword Event: Event type used to create event, + defaults to :func:`Event`. + :keyword utcoffset: Function returning the current utcoffset in hours. + + """ + + with self.mutex: + clock = None if blind else self.clock.forward() + event = Event(type, hostname=self.hostname, utcoffset=utcoffset(), + pid=self.pid, clock=clock, **fields) + exchange = self.exchange + producer.publish( + event, + routing_key=type.replace('-', '.'), + exchange=exchange.name, + retry=retry, + retry_policy=retry_policy, + declare=[exchange], + serializer=self.serializer, + headers=self.headers, + ) + + def send(self, type, blind=False, **fields): + """Send event. + + :param type: Event type name, with group separated by dash (`-`). + :keyword retry: Retry in the event of connection failure. + :keyword retry_policy: Dict of custom retry policy, see + :meth:`~kombu.Connection.ensure`. + :keyword blind: Don't set logical clock value (also do not forward + the internal logical clock). + :keyword Event: Event type used to create event, + defaults to :func:`Event`. + :keyword utcoffset: Function returning the current utcoffset in hours. + :keyword \*\*fields: Event fields, must be json serializable. + + """ + if self.enabled: + groups = self.groups + if groups and group_from(type) not in groups: + return + try: + self.publish(type, fields, self.producer, blind) + except Exception as exc: + if not self.buffer_while_offline: + raise + self._outbound_buffer.append((type, fields, exc)) + + def flush(self): + """Flushes the outbound buffer.""" + while self._outbound_buffer: + try: + type, fields, _ = self._outbound_buffer.popleft() + except IndexError: + return + self.send(type, **fields) + + def extend_buffer(self, other): + """Copies the outbound buffer of another instance.""" + self._outbound_buffer.extend(other._outbound_buffer) + + def close(self): + """Close the event dispatcher.""" + self.mutex.locked() and self.mutex.release() + self.producer = None + + def _get_publisher(self): + return self.producer + + def _set_publisher(self, producer): + self.producer = producer + publisher = property(_get_publisher, _set_publisher) # XXX compat + + +class EventReceiver(ConsumerMixin): + """Capture events. + + :param connection: Connection to the broker. + :keyword handlers: Event handlers. + + :attr:`handlers` is a dict of event types and their handlers, + the special handler `"*"` captures all events that doesn't have a + handler. + + """ + app = None + + def __init__(self, channel, handlers=None, routing_key='#', + node_id=None, app=None, queue_prefix='celeryev', + accept=None): + self.app = app_or_default(app or self.app) + self.channel = maybe_channel(channel) + self.handlers = {} if handlers is None else handlers + self.routing_key = routing_key + self.node_id = node_id or uuid() + self.queue_prefix = queue_prefix + self.exchange = get_exchange(self.connection or self.app.connection()) + self.queue = Queue('.'.join([self.queue_prefix, self.node_id]), + exchange=self.exchange, + routing_key=self.routing_key, + auto_delete=True, + durable=False, + queue_arguments=self._get_queue_arguments()) + self.clock = self.app.clock + self.adjust_clock = self.clock.adjust + self.forward_clock = self.clock.forward + if accept is None: + accept = set([self.app.conf.CELERY_EVENT_SERIALIZER, 'json']) + self.accept = accept + + def _get_queue_arguments(self): + conf = self.app.conf + return dictfilter({ + 'x-message-ttl': maybe_s_to_ms(conf.CELERY_EVENT_QUEUE_TTL), + 'x-expires': maybe_s_to_ms(conf.CELERY_EVENT_QUEUE_EXPIRES), + }) + + def process(self, type, event): + """Process the received event by dispatching it to the appropriate + handler.""" + handler = self.handlers.get(type) or self.handlers.get('*') + handler and handler(event) + + def get_consumers(self, Consumer, channel): + return [Consumer(queues=[self.queue], + callbacks=[self._receive], no_ack=True, + accept=self.accept)] + + def on_consume_ready(self, connection, channel, consumers, + wakeup=True, **kwargs): + if wakeup: + self.wakeup_workers(channel=channel) + + def itercapture(self, limit=None, timeout=None, wakeup=True): + return self.consume(limit=limit, timeout=timeout, wakeup=wakeup) + + def capture(self, limit=None, timeout=None, wakeup=True): + """Open up a consumer capturing events. + + This has to run in the main process, and it will never stop + unless :attr:`EventDispatcher.should_stop` is set to True, or + forced via :exc:`KeyboardInterrupt` or :exc:`SystemExit`. + + """ + return list(self.consume(limit=limit, timeout=timeout, wakeup=wakeup)) + + def wakeup_workers(self, channel=None): + self.app.control.broadcast('heartbeat', + connection=self.connection, + channel=channel) + + def event_from_message(self, body, localize=True, + now=time.time, tzfields=_TZGETTER, + adjust_timestamp=adjust_timestamp, + CLIENT_CLOCK_SKEW=CLIENT_CLOCK_SKEW): + type = body['type'] + if type == 'task-sent': + # clients never sync so cannot use their clock value + _c = body['clock'] = (self.clock.value or 1) + CLIENT_CLOCK_SKEW + self.adjust_clock(_c) + else: + try: + clock = body['clock'] + except KeyError: + body['clock'] = self.forward_clock() + else: + self.adjust_clock(clock) + + if localize: + try: + offset, timestamp = tzfields(body) + except KeyError: + pass + else: + body['timestamp'] = adjust_timestamp(timestamp, offset) + body['local_received'] = now() + return type, body + + def _receive(self, body, message): + self.process(*self.event_from_message(body)) + + @property + def connection(self): + return self.channel.connection.client if self.channel else None + + +class Events(object): + + def __init__(self, app=None): + self.app = app + + @cached_property + def Receiver(self): + return self.app.subclass_with_self(EventReceiver, + reverse='events.Receiver') + + @cached_property + def Dispatcher(self): + return self.app.subclass_with_self(EventDispatcher, + reverse='events.Dispatcher') + + @cached_property + def State(self): + return self.app.subclass_with_self('celery.events.state:State', + reverse='events.State') + + @contextmanager + def default_dispatcher(self, hostname=None, enabled=True, + buffer_while_offline=False): + with self.app.amqp.producer_pool.acquire(block=True) as prod: + with self.Dispatcher(prod.connection, hostname, enabled, + prod.channel, buffer_while_offline) as d: + yield d diff --git a/thesisenv/lib/python3.6/site-packages/celery/events/cursesmon.py b/thesisenv/lib/python3.6/site-packages/celery/events/cursesmon.py new file mode 100644 index 0000000..775f6a0 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/events/cursesmon.py @@ -0,0 +1,544 @@ +# -*- coding: utf-8 -*- +""" + celery.events.cursesmon + ~~~~~~~~~~~~~~~~~~~~~~~ + + Graphical monitor of Celery events using curses. + +""" +from __future__ import absolute_import, print_function + +import curses +import sys +import threading + +from datetime import datetime +from itertools import count +from textwrap import wrap +from time import time +from math import ceil + +from celery import VERSION_BANNER +from celery import states +from celery.app import app_or_default +from celery.five import items, values +from celery.utils.text import abbr, abbrtask + +__all__ = ['CursesMonitor', 'evtop'] + +BORDER_SPACING = 4 +LEFT_BORDER_OFFSET = 3 +UUID_WIDTH = 36 +STATE_WIDTH = 8 +TIMESTAMP_WIDTH = 8 +MIN_WORKER_WIDTH = 15 +MIN_TASK_WIDTH = 16 + +# this module is considered experimental +# we don't care about coverage. + +STATUS_SCREEN = """\ +events: {s.event_count} tasks:{s.task_count} workers:{w_alive}/{w_all} +""" + + +class CursesMonitor(object): # pragma: no cover + keymap = {} + win = None + screen_width = None + screen_delay = 10 + selected_task = None + selected_position = 0 + selected_str = 'Selected: ' + foreground = curses.COLOR_BLACK + background = curses.COLOR_WHITE + online_str = 'Workers online: ' + help_title = 'Keys: ' + help = ('j:down k:up i:info t:traceback r:result c:revoke ^c: quit') + greet = 'celery events {0}'.format(VERSION_BANNER) + info_str = 'Info: ' + + def __init__(self, state, app, keymap=None): + self.app = app + self.keymap = keymap or self.keymap + self.state = state + default_keymap = {'J': self.move_selection_down, + 'K': self.move_selection_up, + 'C': self.revoke_selection, + 'T': self.selection_traceback, + 'R': self.selection_result, + 'I': self.selection_info, + 'L': self.selection_rate_limit} + self.keymap = dict(default_keymap, **self.keymap) + self.lock = threading.RLock() + + def format_row(self, uuid, task, worker, timestamp, state): + mx = self.display_width + + # include spacing + detail_width = mx - 1 - STATE_WIDTH - 1 - TIMESTAMP_WIDTH + uuid_space = detail_width - 1 - MIN_TASK_WIDTH - 1 - MIN_WORKER_WIDTH + + if uuid_space < UUID_WIDTH: + uuid_width = uuid_space + else: + uuid_width = UUID_WIDTH + + detail_width = detail_width - uuid_width - 1 + task_width = int(ceil(detail_width / 2.0)) + worker_width = detail_width - task_width - 1 + + uuid = abbr(uuid, uuid_width).ljust(uuid_width) + worker = abbr(worker, worker_width).ljust(worker_width) + task = abbrtask(task, task_width).ljust(task_width) + state = abbr(state, STATE_WIDTH).ljust(STATE_WIDTH) + timestamp = timestamp.ljust(TIMESTAMP_WIDTH) + + row = '{0} {1} {2} {3} {4} '.format(uuid, worker, task, + timestamp, state) + if self.screen_width is None: + self.screen_width = len(row[:mx]) + return row[:mx] + + @property + def screen_width(self): + _, mx = self.win.getmaxyx() + return mx + + @property + def screen_height(self): + my, _ = self.win.getmaxyx() + return my + + @property + def display_width(self): + _, mx = self.win.getmaxyx() + return mx - BORDER_SPACING + + @property + def display_height(self): + my, _ = self.win.getmaxyx() + return my - 10 + + @property + def limit(self): + return self.display_height + + def find_position(self): + if not self.tasks: + return 0 + for i, e in enumerate(self.tasks): + if self.selected_task == e[0]: + return i + return 0 + + def move_selection_up(self): + self.move_selection(-1) + + def move_selection_down(self): + self.move_selection(1) + + def move_selection(self, direction=1): + if not self.tasks: + return + pos = self.find_position() + try: + self.selected_task = self.tasks[pos + direction][0] + except IndexError: + self.selected_task = self.tasks[0][0] + + keyalias = {curses.KEY_DOWN: 'J', + curses.KEY_UP: 'K', + curses.KEY_ENTER: 'I'} + + def handle_keypress(self): + try: + key = self.win.getkey().upper() + except: + return + key = self.keyalias.get(key) or key + handler = self.keymap.get(key) + if handler is not None: + handler() + + def alert(self, callback, title=None): + self.win.erase() + my, mx = self.win.getmaxyx() + y = blank_line = count(2) + if title: + self.win.addstr(next(y), 3, title, + curses.A_BOLD | curses.A_UNDERLINE) + next(blank_line) + callback(my, mx, next(y)) + self.win.addstr(my - 1, 0, 'Press any key to continue...', + curses.A_BOLD) + self.win.refresh() + while 1: + try: + return self.win.getkey().upper() + except: + pass + + def selection_rate_limit(self): + if not self.selected_task: + return curses.beep() + task = self.state.tasks[self.selected_task] + if not task.name: + return curses.beep() + + my, mx = self.win.getmaxyx() + r = 'New rate limit: ' + self.win.addstr(my - 2, 3, r, curses.A_BOLD | curses.A_UNDERLINE) + self.win.addstr(my - 2, len(r) + 3, ' ' * (mx - len(r))) + rlimit = self.readline(my - 2, 3 + len(r)) + + if rlimit: + reply = self.app.control.rate_limit(task.name, + rlimit.strip(), reply=True) + self.alert_remote_control_reply(reply) + + def alert_remote_control_reply(self, reply): + + def callback(my, mx, xs): + y = count(xs) + if not reply: + self.win.addstr( + next(y), 3, 'No replies received in 1s deadline.', + curses.A_BOLD + curses.color_pair(2), + ) + return + + for subreply in reply: + curline = next(y) + + host, response = next(items(subreply)) + host = '{0}: '.format(host) + self.win.addstr(curline, 3, host, curses.A_BOLD) + attr = curses.A_NORMAL + text = '' + if 'error' in response: + text = response['error'] + attr |= curses.color_pair(2) + elif 'ok' in response: + text = response['ok'] + attr |= curses.color_pair(3) + self.win.addstr(curline, 3 + len(host), text, attr) + + return self.alert(callback, 'Remote Control Command Replies') + + def readline(self, x, y): + buffer = str() + curses.echo() + try: + i = 0 + while 1: + ch = self.win.getch(x, y + i) + if ch != -1: + if ch in (10, curses.KEY_ENTER): # enter + break + if ch in (27, ): + buffer = str() + break + buffer += chr(ch) + i += 1 + finally: + curses.noecho() + return buffer + + def revoke_selection(self): + if not self.selected_task: + return curses.beep() + reply = self.app.control.revoke(self.selected_task, reply=True) + self.alert_remote_control_reply(reply) + + def selection_info(self): + if not self.selected_task: + return + + def alert_callback(mx, my, xs): + my, mx = self.win.getmaxyx() + y = count(xs) + task = self.state.tasks[self.selected_task] + info = task.info(extra=['state']) + infoitems = [ + ('args', info.pop('args', None)), + ('kwargs', info.pop('kwargs', None)) + ] + list(info.items()) + for key, value in infoitems: + if key is None: + continue + value = str(value) + curline = next(y) + keys = key + ': ' + self.win.addstr(curline, 3, keys, curses.A_BOLD) + wrapped = wrap(value, mx - 2) + if len(wrapped) == 1: + self.win.addstr( + curline, len(keys) + 3, + abbr(wrapped[0], + self.screen_width - (len(keys) + 3))) + else: + for subline in wrapped: + nexty = next(y) + if nexty >= my - 1: + subline = ' ' * 4 + '[...]' + elif nexty >= my: + break + self.win.addstr( + nexty, 3, + abbr(' ' * 4 + subline, self.screen_width - 4), + curses.A_NORMAL, + ) + + return self.alert( + alert_callback, 'Task details for {0.selected_task}'.format(self), + ) + + def selection_traceback(self): + if not self.selected_task: + return curses.beep() + task = self.state.tasks[self.selected_task] + if task.state not in states.EXCEPTION_STATES: + return curses.beep() + + def alert_callback(my, mx, xs): + y = count(xs) + for line in task.traceback.split('\n'): + self.win.addstr(next(y), 3, line) + + return self.alert( + alert_callback, + 'Task Exception Traceback for {0.selected_task}'.format(self), + ) + + def selection_result(self): + if not self.selected_task: + return + + def alert_callback(my, mx, xs): + y = count(xs) + task = self.state.tasks[self.selected_task] + result = (getattr(task, 'result', None) or + getattr(task, 'exception', None)) + for line in wrap(result or '', mx - 2): + self.win.addstr(next(y), 3, line) + + return self.alert( + alert_callback, + 'Task Result for {0.selected_task}'.format(self), + ) + + def display_task_row(self, lineno, task): + state_color = self.state_colors.get(task.state) + attr = curses.A_NORMAL + if task.uuid == self.selected_task: + attr = curses.A_STANDOUT + timestamp = datetime.utcfromtimestamp( + task.timestamp or time(), + ) + timef = timestamp.strftime('%H:%M:%S') + hostname = task.worker.hostname if task.worker else '*NONE*' + line = self.format_row(task.uuid, task.name, + hostname, + timef, task.state) + self.win.addstr(lineno, LEFT_BORDER_OFFSET, line, attr) + + if state_color: + self.win.addstr(lineno, + len(line) - STATE_WIDTH + BORDER_SPACING - 1, + task.state, state_color | attr) + + def draw(self): + with self.lock: + win = self.win + self.handle_keypress() + x = LEFT_BORDER_OFFSET + y = blank_line = count(2) + my, mx = win.getmaxyx() + win.erase() + win.bkgd(' ', curses.color_pair(1)) + win.border() + win.addstr(1, x, self.greet, curses.A_DIM | curses.color_pair(5)) + next(blank_line) + win.addstr(next(y), x, self.format_row('UUID', 'TASK', + 'WORKER', 'TIME', 'STATE'), + curses.A_BOLD | curses.A_UNDERLINE) + tasks = self.tasks + if tasks: + for row, (uuid, task) in enumerate(tasks): + if row > self.display_height: + break + + if task.uuid: + lineno = next(y) + self.display_task_row(lineno, task) + + # -- Footer + next(blank_line) + win.hline(my - 6, x, curses.ACS_HLINE, self.screen_width - 4) + + # Selected Task Info + if self.selected_task: + win.addstr(my - 5, x, self.selected_str, curses.A_BOLD) + info = 'Missing extended info' + detail = '' + try: + selection = self.state.tasks[self.selected_task] + except KeyError: + pass + else: + info = selection.info() + if 'runtime' in info: + info['runtime'] = '{0:.2f}'.format(info['runtime']) + if 'result' in info: + info['result'] = abbr(info['result'], 16) + info = ' '.join( + '{0}={1}'.format(key, value) + for key, value in items(info) + ) + detail = '... -> key i' + infowin = abbr(info, + self.screen_width - len(self.selected_str) - 2, + detail) + win.addstr(my - 5, x + len(self.selected_str), infowin) + # Make ellipsis bold + if detail in infowin: + detailpos = len(infowin) - len(detail) + win.addstr(my - 5, x + len(self.selected_str) + detailpos, + detail, curses.A_BOLD) + else: + win.addstr(my - 5, x, 'No task selected', curses.A_NORMAL) + + # Workers + if self.workers: + win.addstr(my - 4, x, self.online_str, curses.A_BOLD) + win.addstr(my - 4, x + len(self.online_str), + ', '.join(sorted(self.workers)), curses.A_NORMAL) + else: + win.addstr(my - 4, x, 'No workers discovered.') + + # Info + win.addstr(my - 3, x, self.info_str, curses.A_BOLD) + win.addstr( + my - 3, x + len(self.info_str), + STATUS_SCREEN.format( + s=self.state, + w_alive=len([w for w in values(self.state.workers) + if w.alive]), + w_all=len(self.state.workers), + ), + curses.A_DIM, + ) + + # Help + self.safe_add_str(my - 2, x, self.help_title, curses.A_BOLD) + self.safe_add_str(my - 2, x + len(self.help_title), self.help, + curses.A_DIM) + win.refresh() + + def safe_add_str(self, y, x, string, *args, **kwargs): + if x + len(string) > self.screen_width: + string = string[:self.screen_width - x] + self.win.addstr(y, x, string, *args, **kwargs) + + def init_screen(self): + with self.lock: + self.win = curses.initscr() + self.win.nodelay(True) + self.win.keypad(True) + curses.start_color() + curses.init_pair(1, self.foreground, self.background) + # exception states + curses.init_pair(2, curses.COLOR_RED, self.background) + # successful state + curses.init_pair(3, curses.COLOR_GREEN, self.background) + # revoked state + curses.init_pair(4, curses.COLOR_MAGENTA, self.background) + # greeting + curses.init_pair(5, curses.COLOR_BLUE, self.background) + # started state + curses.init_pair(6, curses.COLOR_YELLOW, self.foreground) + + self.state_colors = {states.SUCCESS: curses.color_pair(3), + states.REVOKED: curses.color_pair(4), + states.STARTED: curses.color_pair(6)} + for state in states.EXCEPTION_STATES: + self.state_colors[state] = curses.color_pair(2) + + curses.cbreak() + + def resetscreen(self): + with self.lock: + curses.nocbreak() + self.win.keypad(False) + curses.echo() + curses.endwin() + + def nap(self): + curses.napms(self.screen_delay) + + @property + def tasks(self): + return list(self.state.tasks_by_time(limit=self.limit)) + + @property + def workers(self): + return [hostname for hostname, w in items(self.state.workers) + if w.alive] + + +class DisplayThread(threading.Thread): # pragma: no cover + + def __init__(self, display): + self.display = display + self.shutdown = False + threading.Thread.__init__(self) + + def run(self): + while not self.shutdown: + self.display.draw() + self.display.nap() + + +def capture_events(app, state, display): # pragma: no cover + + def on_connection_error(exc, interval): + print('Connection Error: {0!r}. Retry in {1}s.'.format( + exc, interval), file=sys.stderr) + + while 1: + print('-> evtop: starting capture...', file=sys.stderr) + with app.connection() as conn: + try: + conn.ensure_connection(on_connection_error, + app.conf.BROKER_CONNECTION_MAX_RETRIES) + recv = app.events.Receiver(conn, handlers={'*': state.event}) + display.resetscreen() + display.init_screen() + recv.capture() + except conn.connection_errors + conn.channel_errors as exc: + print('Connection lost: {0!r}'.format(exc), file=sys.stderr) + + +def evtop(app=None): # pragma: no cover + app = app_or_default(app) + state = app.events.State() + display = CursesMonitor(state, app) + display.init_screen() + refresher = DisplayThread(display) + refresher.start() + try: + capture_events(app, state, display) + except Exception: + refresher.shutdown = True + refresher.join() + display.resetscreen() + raise + except (KeyboardInterrupt, SystemExit): + refresher.shutdown = True + refresher.join() + display.resetscreen() + + +if __name__ == '__main__': # pragma: no cover + evtop() diff --git a/thesisenv/lib/python3.6/site-packages/celery/events/dumper.py b/thesisenv/lib/python3.6/site-packages/celery/events/dumper.py new file mode 100644 index 0000000..323afc4 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/events/dumper.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +""" + celery.events.dumper + ~~~~~~~~~~~~~~~~~~~~ + + This is a simple program that dumps events to the console + as they happen. Think of it like a `tcpdump` for Celery events. + +""" +from __future__ import absolute_import, print_function + +import sys + +from datetime import datetime + +from celery.app import app_or_default +from celery.utils.functional import LRUCache +from celery.utils.timeutils import humanize_seconds + +__all__ = ['Dumper', 'evdump'] + +TASK_NAMES = LRUCache(limit=0xFFF) + +HUMAN_TYPES = {'worker-offline': 'shutdown', + 'worker-online': 'started', + 'worker-heartbeat': 'heartbeat'} + +CONNECTION_ERROR = """\ +-> Cannot connect to %s: %s. +Trying again %s +""" + + +def humanize_type(type): + try: + return HUMAN_TYPES[type.lower()] + except KeyError: + return type.lower().replace('-', ' ') + + +class Dumper(object): + + def __init__(self, out=sys.stdout): + self.out = out + + def say(self, msg): + print(msg, file=self.out) + # need to flush so that output can be piped. + try: + self.out.flush() + except AttributeError: + pass + + def on_event(self, ev): + timestamp = datetime.utcfromtimestamp(ev.pop('timestamp')) + type = ev.pop('type').lower() + hostname = ev.pop('hostname') + if type.startswith('task-'): + uuid = ev.pop('uuid') + if type in ('task-received', 'task-sent'): + task = TASK_NAMES[uuid] = '{0}({1}) args={2} kwargs={3}' \ + .format(ev.pop('name'), uuid, + ev.pop('args'), + ev.pop('kwargs')) + else: + task = TASK_NAMES.get(uuid, '') + return self.format_task_event(hostname, timestamp, + type, task, ev) + fields = ', '.join( + '{0}={1}'.format(key, ev[key]) for key in sorted(ev) + ) + sep = fields and ':' or '' + self.say('{0} [{1}] {2}{3} {4}'.format( + hostname, timestamp, humanize_type(type), sep, fields), + ) + + def format_task_event(self, hostname, timestamp, type, task, event): + fields = ', '.join( + '{0}={1}'.format(key, event[key]) for key in sorted(event) + ) + sep = fields and ':' or '' + self.say('{0} [{1}] {2}{3} {4} {5}'.format( + hostname, timestamp, humanize_type(type), sep, task, fields), + ) + + +def evdump(app=None, out=sys.stdout): + app = app_or_default(app) + dumper = Dumper(out=out) + dumper.say('-> evdump: starting capture...') + conn = app.connection().clone() + + def _error_handler(exc, interval): + dumper.say(CONNECTION_ERROR % ( + conn.as_uri(), exc, humanize_seconds(interval, 'in', ' ') + )) + + while 1: + try: + conn.ensure_connection(_error_handler) + recv = app.events.Receiver(conn, handlers={'*': dumper.on_event}) + recv.capture() + except (KeyboardInterrupt, SystemExit): + return conn and conn.close() + except conn.connection_errors + conn.channel_errors: + dumper.say('-> Connection lost, attempting reconnect') + +if __name__ == '__main__': # pragma: no cover + evdump() diff --git a/thesisenv/lib/python3.6/site-packages/celery/events/snapshot.py b/thesisenv/lib/python3.6/site-packages/celery/events/snapshot.py new file mode 100644 index 0000000..0dd4155 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/events/snapshot.py @@ -0,0 +1,114 @@ +# -*- coding: utf-8 -*- +""" + celery.events.snapshot + ~~~~~~~~~~~~~~~~~~~~~~ + + Consuming the events as a stream is not always suitable + so this module implements a system to take snapshots of the + state of a cluster at regular intervals. There is a full + implementation of this writing the snapshots to a database + in :mod:`djcelery.snapshots` in the `django-celery` distribution. + +""" +from __future__ import absolute_import + +from kombu.utils.limits import TokenBucket + +from celery import platforms +from celery.app import app_or_default +from celery.utils.timer2 import Timer +from celery.utils.dispatch import Signal +from celery.utils.imports import instantiate +from celery.utils.log import get_logger +from celery.utils.timeutils import rate + +__all__ = ['Polaroid', 'evcam'] + +logger = get_logger('celery.evcam') + + +class Polaroid(object): + timer = None + shutter_signal = Signal(providing_args=('state', )) + cleanup_signal = Signal() + clear_after = False + + _tref = None + _ctref = None + + def __init__(self, state, freq=1.0, maxrate=None, + cleanup_freq=3600.0, timer=None, app=None): + self.app = app_or_default(app) + self.state = state + self.freq = freq + self.cleanup_freq = cleanup_freq + self.timer = timer or self.timer or Timer() + self.logger = logger + self.maxrate = maxrate and TokenBucket(rate(maxrate)) + + def install(self): + self._tref = self.timer.call_repeatedly(self.freq, self.capture) + self._ctref = self.timer.call_repeatedly( + self.cleanup_freq, self.cleanup, + ) + + def on_shutter(self, state): + pass + + def on_cleanup(self): + pass + + def cleanup(self): + logger.debug('Cleanup: Running...') + self.cleanup_signal.send(None) + self.on_cleanup() + + def shutter(self): + if self.maxrate is None or self.maxrate.can_consume(): + logger.debug('Shutter: %s', self.state) + self.shutter_signal.send(self.state) + self.on_shutter(self.state) + + def capture(self): + self.state.freeze_while(self.shutter, clear_after=self.clear_after) + + def cancel(self): + if self._tref: + self._tref() # flush all received events. + self._tref.cancel() + if self._ctref: + self._ctref.cancel() + + def __enter__(self): + self.install() + return self + + def __exit__(self, *exc_info): + self.cancel() + + +def evcam(camera, freq=1.0, maxrate=None, loglevel=0, + logfile=None, pidfile=None, timer=None, app=None): + app = app_or_default(app) + + if pidfile: + platforms.create_pidlock(pidfile) + + app.log.setup_logging_subsystem(loglevel, logfile) + + print('-> evcam: Taking snapshots with {0} (every {1} secs.)'.format( + camera, freq)) + state = app.events.State() + cam = instantiate(camera, state, app=app, freq=freq, + maxrate=maxrate, timer=timer) + cam.install() + conn = app.connection() + recv = app.events.Receiver(conn, handlers={'*': state.event}) + try: + try: + recv.capture(limit=None) + except KeyboardInterrupt: + raise SystemExit + finally: + cam.cancel() + conn.close() diff --git a/thesisenv/lib/python3.6/site-packages/celery/events/state.py b/thesisenv/lib/python3.6/site-packages/celery/events/state.py new file mode 100644 index 0000000..c78f2d0 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/events/state.py @@ -0,0 +1,656 @@ +# -*- coding: utf-8 -*- +""" + celery.events.state + ~~~~~~~~~~~~~~~~~~~ + + This module implements a datastructure used to keep + track of the state of a cluster of workers and the tasks + it is working on (by consuming events). + + For every event consumed the state is updated, + so the state represents the state of the cluster + at the time of the last event. + + Snapshots (:mod:`celery.events.snapshot`) can be used to + take "pictures" of this state at regular intervals + to e.g. store that in a database. + +""" +from __future__ import absolute_import + +import bisect +import sys +import threading + +from datetime import datetime +from decimal import Decimal +from itertools import islice +from operator import itemgetter +from time import time +from weakref import ref + +from kombu.clocks import timetuple +from kombu.utils import cached_property, kwdict + +from celery import states +from celery.five import class_property, items, values +from celery.utils import deprecated +from celery.utils.functional import LRUCache, memoize +from celery.utils.log import get_logger + +PYPY = hasattr(sys, 'pypy_version_info') + +# The window (in percentage) is added to the workers heartbeat +# frequency. If the time between updates exceeds this window, +# then the worker is considered to be offline. +HEARTBEAT_EXPIRE_WINDOW = 200 + +# Max drift between event timestamp and time of event received +# before we alert that clocks may be unsynchronized. +HEARTBEAT_DRIFT_MAX = 16 + +DRIFT_WARNING = """\ +Substantial drift from %s may mean clocks are out of sync. Current drift is +%s seconds. [orig: %s recv: %s] +""" + +CAN_KWDICT = sys.version_info >= (2, 6, 5) + +logger = get_logger(__name__) +warn = logger.warning + +R_STATE = '' +R_WORKER = ' HEARTBEAT_DRIFT_MAX: + _warn_drift(self.hostname, drift, + local_received, timestamp) + if local_received: + hearts = len(heartbeats) + if hearts > hbmax - 1: + hb_pop(0) + if hearts and local_received > heartbeats[-1]: + hb_append(local_received) + else: + insort(heartbeats, local_received) + return event + + def update(self, f, **kw): + for k, v in items(dict(f, **kw) if kw else f): + setattr(self, k, v) + + def __repr__(self): + return R_WORKER.format(self) + + @property + def status_string(self): + return 'ONLINE' if self.alive else 'OFFLINE' + + @property + def heartbeat_expires(self): + return heartbeat_expires(self.heartbeats[-1], + self.freq, self.expire_window) + + @property + def alive(self, nowfun=time): + return bool(self.heartbeats and nowfun() < self.heartbeat_expires) + + @property + def id(self): + return '{0.hostname}.{0.pid}'.format(self) + + @deprecated(3.2, 3.3) + def update_heartbeat(self, received, timestamp): + self.event(None, timestamp, received) + + @deprecated(3.2, 3.3) + def on_online(self, timestamp=None, local_received=None, **fields): + self.event('online', timestamp, local_received, fields) + + @deprecated(3.2, 3.3) + def on_offline(self, timestamp=None, local_received=None, **fields): + self.event('offline', timestamp, local_received, fields) + + @deprecated(3.2, 3.3) + def on_heartbeat(self, timestamp=None, local_received=None, **fields): + self.event('heartbeat', timestamp, local_received, fields) + + @class_property + def _defaults(cls): + """Deprecated, to be removed in 3.3""" + source = cls() + return dict((k, getattr(source, k)) for k in cls._fields) + + +@with_unique_field('uuid') +class Task(object): + """Task State.""" + name = received = sent = started = succeeded = failed = retried = \ + revoked = args = kwargs = eta = expires = retries = worker = result = \ + exception = timestamp = runtime = traceback = exchange = \ + routing_key = client = None + state = states.PENDING + clock = 0 + + _fields = ('uuid', 'name', 'state', 'received', 'sent', 'started', + 'succeeded', 'failed', 'retried', 'revoked', 'args', 'kwargs', + 'eta', 'expires', 'retries', 'worker', 'result', 'exception', + 'timestamp', 'runtime', 'traceback', 'exchange', 'routing_key', + 'clock', 'client') + if not PYPY: + __slots__ = ('__dict__', '__weakref__') + + #: How to merge out of order events. + #: Disorder is detected by logical ordering (e.g. :event:`task-received` + #: must have happened before a :event:`task-failed` event). + #: + #: A merge rule consists of a state and a list of fields to keep from + #: that state. ``(RECEIVED, ('name', 'args')``, means the name and args + #: fields are always taken from the RECEIVED state, and any values for + #: these fields received before or after is simply ignored. + merge_rules = {states.RECEIVED: ('name', 'args', 'kwargs', + 'retries', 'eta', 'expires')} + + #: meth:`info` displays these fields by default. + _info_fields = ('args', 'kwargs', 'retries', 'result', 'eta', 'runtime', + 'expires', 'exception', 'exchange', 'routing_key') + + def __init__(self, uuid=None, **kwargs): + self.uuid = uuid + if kwargs: + for k, v in items(kwargs): + setattr(self, k, v) + + def event(self, type_, timestamp=None, local_received=None, fields=None, + precedence=states.precedence, items=items, dict=dict, + PENDING=states.PENDING, RECEIVED=states.RECEIVED, + STARTED=states.STARTED, FAILURE=states.FAILURE, + RETRY=states.RETRY, SUCCESS=states.SUCCESS, + REVOKED=states.REVOKED): + fields = fields or {} + if type_ == 'sent': + state, self.sent = PENDING, timestamp + elif type_ == 'received': + state, self.received = RECEIVED, timestamp + elif type_ == 'started': + state, self.started = STARTED, timestamp + elif type_ == 'failed': + state, self.failed = FAILURE, timestamp + elif type_ == 'retried': + state, self.retried = RETRY, timestamp + elif type_ == 'succeeded': + state, self.succeeded = SUCCESS, timestamp + elif type_ == 'revoked': + state, self.revoked = REVOKED, timestamp + else: + state = type_.upper() + + # note that precedence here is reversed + # see implementation in celery.states.state.__lt__ + if state != RETRY and self.state != RETRY and \ + precedence(state) > precedence(self.state): + # this state logically happens-before the current state, so merge. + keep = self.merge_rules.get(state) + if keep is not None: + fields = dict( + (k, v) for k, v in items(fields) if k in keep + ) + for key, value in items(fields): + setattr(self, key, value) + else: + self.state = state + self.timestamp = timestamp + for key, value in items(fields): + setattr(self, key, value) + + def info(self, fields=None, extra=[]): + """Information about this task suitable for on-screen display.""" + fields = self._info_fields if fields is None else fields + + def _keys(): + for key in list(fields) + list(extra): + value = getattr(self, key, None) + if value is not None: + yield key, value + + return dict(_keys()) + + def __repr__(self): + return R_TASK.format(self) + + def as_dict(self): + get = object.__getattribute__ + return dict( + (k, get(self, k)) for k in self._fields + ) + + def __reduce__(self): + return _depickle_task, (self.__class__, self.as_dict()) + + @property + def origin(self): + return self.client if self.worker is None else self.worker.id + + @property + def ready(self): + return self.state in states.READY_STATES + + @deprecated(3.2, 3.3) + def on_sent(self, timestamp=None, **fields): + self.event('sent', timestamp, fields) + + @deprecated(3.2, 3.3) + def on_received(self, timestamp=None, **fields): + self.event('received', timestamp, fields) + + @deprecated(3.2, 3.3) + def on_started(self, timestamp=None, **fields): + self.event('started', timestamp, fields) + + @deprecated(3.2, 3.3) + def on_failed(self, timestamp=None, **fields): + self.event('failed', timestamp, fields) + + @deprecated(3.2, 3.3) + def on_retried(self, timestamp=None, **fields): + self.event('retried', timestamp, fields) + + @deprecated(3.2, 3.3) + def on_succeeded(self, timestamp=None, **fields): + self.event('succeeded', timestamp, fields) + + @deprecated(3.2, 3.3) + def on_revoked(self, timestamp=None, **fields): + self.event('revoked', timestamp, fields) + + @deprecated(3.2, 3.3) + def on_unknown_event(self, shortype, timestamp=None, **fields): + self.event(shortype, timestamp, fields) + + @deprecated(3.2, 3.3) + def update(self, state, timestamp, fields, + _state=states.state, RETRY=states.RETRY): + return self.event(state, timestamp, None, fields) + + @deprecated(3.2, 3.3) + def merge(self, state, timestamp, fields): + keep = self.merge_rules.get(state) + if keep is not None: + fields = dict((k, v) for k, v in items(fields) if k in keep) + for key, value in items(fields): + setattr(self, key, value) + + @class_property + def _defaults(cls): + """Deprecated, to be removed in 3.3.""" + source = cls() + return dict((k, getattr(source, k)) for k in source._fields) + + +class State(object): + """Records clusters state.""" + Worker = Worker + Task = Task + event_count = 0 + task_count = 0 + heap_multiplier = 4 + + def __init__(self, callback=None, + workers=None, tasks=None, taskheap=None, + max_workers_in_memory=5000, max_tasks_in_memory=10000, + on_node_join=None, on_node_leave=None): + self.event_callback = callback + self.workers = (LRUCache(max_workers_in_memory) + if workers is None else workers) + self.tasks = (LRUCache(max_tasks_in_memory) + if tasks is None else tasks) + self._taskheap = [] if taskheap is None else taskheap + self.max_workers_in_memory = max_workers_in_memory + self.max_tasks_in_memory = max_tasks_in_memory + self.on_node_join = on_node_join + self.on_node_leave = on_node_leave + self._mutex = threading.Lock() + self.handlers = {} + self._seen_types = set() + self.rebuild_taskheap() + + @cached_property + def _event(self): + return self._create_dispatcher() + + def freeze_while(self, fun, *args, **kwargs): + clear_after = kwargs.pop('clear_after', False) + with self._mutex: + try: + return fun(*args, **kwargs) + finally: + if clear_after: + self._clear() + + def clear_tasks(self, ready=True): + with self._mutex: + return self._clear_tasks(ready) + + def _clear_tasks(self, ready=True): + if ready: + in_progress = dict( + (uuid, task) for uuid, task in self.itertasks() + if task.state not in states.READY_STATES) + self.tasks.clear() + self.tasks.update(in_progress) + else: + self.tasks.clear() + self._taskheap[:] = [] + + def _clear(self, ready=True): + self.workers.clear() + self._clear_tasks(ready) + self.event_count = 0 + self.task_count = 0 + + def clear(self, ready=True): + with self._mutex: + return self._clear(ready) + + def get_or_create_worker(self, hostname, **kwargs): + """Get or create worker by hostname. + + Return tuple of ``(worker, was_created)``. + """ + try: + worker = self.workers[hostname] + if kwargs: + worker.update(kwargs) + return worker, False + except KeyError: + worker = self.workers[hostname] = self.Worker( + hostname, **kwargs) + return worker, True + + def get_or_create_task(self, uuid): + """Get or create task by uuid.""" + try: + return self.tasks[uuid], False + except KeyError: + task = self.tasks[uuid] = self.Task(uuid) + return task, True + + def event(self, event): + with self._mutex: + return self._event(event) + + def task_event(self, type_, fields): + """Deprecated, use :meth:`event`.""" + return self._event(dict(fields, type='-'.join(['task', type_])))[0] + + def worker_event(self, type_, fields): + """Deprecated, use :meth:`event`.""" + return self._event(dict(fields, type='-'.join(['worker', type_])))[0] + + def _create_dispatcher(self): + get_handler = self.handlers.__getitem__ + event_callback = self.event_callback + wfields = itemgetter('hostname', 'timestamp', 'local_received') + tfields = itemgetter('uuid', 'hostname', 'timestamp', + 'local_received', 'clock') + taskheap = self._taskheap + th_append = taskheap.append + th_pop = taskheap.pop + # Removing events from task heap is an O(n) operation, + # so easier to just account for the common number of events + # for each task (PENDING->RECEIVED->STARTED->final) + #: an O(n) operation + max_events_in_heap = self.max_tasks_in_memory * self.heap_multiplier + add_type = self._seen_types.add + on_node_join, on_node_leave = self.on_node_join, self.on_node_leave + tasks, Task = self.tasks, self.Task + workers, Worker = self.workers, self.Worker + # avoid updating LRU entry at getitem + get_worker, get_task = workers.data.__getitem__, tasks.data.__getitem__ + + def _event(event, + timetuple=timetuple, KeyError=KeyError, + insort=bisect.insort, created=True): + self.event_count += 1 + if event_callback: + event_callback(self, event) + group, _, subject = event['type'].partition('-') + try: + handler = get_handler(group) + except KeyError: + pass + else: + return handler(subject, event), subject + + if group == 'worker': + try: + hostname, timestamp, local_received = wfields(event) + except KeyError: + pass + else: + is_offline = subject == 'offline' + try: + worker, created = get_worker(hostname), False + except KeyError: + if is_offline: + worker, created = Worker(hostname), False + else: + worker = workers[hostname] = Worker(hostname) + worker.event(subject, timestamp, local_received, event) + if on_node_join and (created or subject == 'online'): + on_node_join(worker) + if on_node_leave and is_offline: + on_node_leave(worker) + workers.pop(hostname, None) + return (worker, created), subject + elif group == 'task': + (uuid, hostname, timestamp, + local_received, clock) = tfields(event) + # task-sent event is sent by client, not worker + is_client_event = subject == 'sent' + try: + task, created = get_task(uuid), False + except KeyError: + task = tasks[uuid] = Task(uuid) + if is_client_event: + task.client = hostname + else: + try: + worker, created = get_worker(hostname), False + except KeyError: + worker = workers[hostname] = Worker(hostname) + task.worker = worker + if worker is not None and local_received: + worker.event(None, local_received, timestamp) + + origin = hostname if is_client_event else worker.id + + # remove oldest event if exceeding the limit. + heaps = len(taskheap) + if heaps + 1 > max_events_in_heap: + th_pop(0) + + # most events will be dated later than the previous. + timetup = timetuple(clock, timestamp, origin, ref(task)) + if heaps and timetup > taskheap[-1]: + th_append(timetup) + else: + insort(taskheap, timetup) + + if subject == 'received': + self.task_count += 1 + task.event(subject, timestamp, local_received, event) + task_name = task.name + if task_name is not None: + add_type(task_name) + return (task, created), subject + return _event + + def rebuild_taskheap(self, timetuple=timetuple): + heap = self._taskheap[:] = [ + timetuple(t.clock, t.timestamp, t.origin, ref(t)) + for t in values(self.tasks) + ] + heap.sort() + + def itertasks(self, limit=None): + for index, row in enumerate(items(self.tasks)): + yield row + if limit and index + 1 >= limit: + break + + def tasks_by_time(self, limit=None): + """Generator giving tasks ordered by time, + in ``(uuid, Task)`` tuples.""" + seen = set() + for evtup in islice(reversed(self._taskheap), 0, limit): + task = evtup[3]() + if task is not None: + uuid = task.uuid + if uuid not in seen: + yield uuid, task + seen.add(uuid) + tasks_by_timestamp = tasks_by_time + + def tasks_by_type(self, name, limit=None): + """Get all tasks by type. + + Return a list of ``(uuid, Task)`` tuples. + + """ + return islice( + ((uuid, task) for uuid, task in self.tasks_by_time() + if task.name == name), + 0, limit, + ) + + def tasks_by_worker(self, hostname, limit=None): + """Get all tasks by worker. + + """ + return islice( + ((uuid, task) for uuid, task in self.tasks_by_time() + if task.worker.hostname == hostname), + 0, limit, + ) + + def task_types(self): + """Return a list of all seen task types.""" + return sorted(self._seen_types) + + def alive_workers(self): + """Return a list of (seemingly) alive workers.""" + return [w for w in values(self.workers) if w.alive] + + def __repr__(self): + return R_STATE.format(self) + + def __reduce__(self): + return self.__class__, ( + self.event_callback, self.workers, self.tasks, None, + self.max_workers_in_memory, self.max_tasks_in_memory, + self.on_node_join, self.on_node_leave, + ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/exceptions.py b/thesisenv/lib/python3.6/site-packages/celery/exceptions.py new file mode 100644 index 0000000..ab65019 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/exceptions.py @@ -0,0 +1,171 @@ +# -*- coding: utf-8 -*- +""" + celery.exceptions + ~~~~~~~~~~~~~~~~~ + + This module contains all exceptions used by the Celery API. + +""" +from __future__ import absolute_import + +import numbers + +from .five import string_t + +from billiard.exceptions import ( # noqa + SoftTimeLimitExceeded, TimeLimitExceeded, WorkerLostError, Terminated, +) + +__all__ = ['SecurityError', 'Ignore', 'QueueNotFound', + 'WorkerShutdown', 'WorkerTerminate', + 'ImproperlyConfigured', 'NotRegistered', 'AlreadyRegistered', + 'TimeoutError', 'MaxRetriesExceededError', 'Retry', + 'TaskRevokedError', 'NotConfigured', 'AlwaysEagerIgnored', + 'InvalidTaskError', 'ChordError', 'CPendingDeprecationWarning', + 'CDeprecationWarning', 'FixupWarning', 'DuplicateNodenameWarning', + 'SoftTimeLimitExceeded', 'TimeLimitExceeded', 'WorkerLostError', + 'Terminated'] + +UNREGISTERED_FMT = """\ +Task of kind {0} is not registered, please make sure it's imported.\ +""" + + +class SecurityError(Exception): + """Security related exceptions. + + Handle with care. + + """ + + +class Ignore(Exception): + """A task can raise this to ignore doing state updates.""" + + +class Reject(Exception): + """A task can raise this if it wants to reject/requeue the message.""" + + def __init__(self, reason=None, requeue=False): + self.reason = reason + self.requeue = requeue + super(Reject, self).__init__(reason, requeue) + + def __repr__(self): + return 'reject requeue=%s: %s' % (self.requeue, self.reason) + + +class WorkerTerminate(SystemExit): + """Signals that the worker should terminate immediately.""" +SystemTerminate = WorkerTerminate # XXX compat + + +class WorkerShutdown(SystemExit): + """Signals that the worker should perform a warm shutdown.""" + + +class QueueNotFound(KeyError): + """Task routed to a queue not in CELERY_QUEUES.""" + + +class ImproperlyConfigured(ImportError): + """Celery is somehow improperly configured.""" + + +class NotRegistered(KeyError): + """The task is not registered.""" + + def __repr__(self): + return UNREGISTERED_FMT.format(self) + + +class AlreadyRegistered(Exception): + """The task is already registered.""" + + +class TimeoutError(Exception): + """The operation timed out.""" + + +class MaxRetriesExceededError(Exception): + """The tasks max restart limit has been exceeded.""" + + +class Retry(Exception): + """The task is to be retried later.""" + + #: Optional message describing context of retry. + message = None + + #: Exception (if any) that caused the retry to happen. + exc = None + + #: Time of retry (ETA), either :class:`numbers.Real` or + #: :class:`~datetime.datetime`. + when = None + + def __init__(self, message=None, exc=None, when=None, **kwargs): + from kombu.utils.encoding import safe_repr + self.message = message + if isinstance(exc, string_t): + self.exc, self.excs = None, exc + else: + self.exc, self.excs = exc, safe_repr(exc) if exc else None + self.when = when + Exception.__init__(self, exc, when, **kwargs) + + def humanize(self): + if isinstance(self.when, numbers.Real): + return 'in {0.when}s'.format(self) + return 'at {0.when}'.format(self) + + def __str__(self): + if self.message: + return self.message + if self.excs: + return 'Retry {0}: {1}'.format(self.humanize(), self.excs) + return 'Retry {0}'.format(self.humanize()) + + def __reduce__(self): + return self.__class__, (self.message, self.excs, self.when) +RetryTaskError = Retry # XXX compat + + +class TaskRevokedError(Exception): + """The task has been revoked, so no result available.""" + + +class NotConfigured(UserWarning): + """Celery has not been configured, as no config module has been found.""" + + +class AlwaysEagerIgnored(UserWarning): + """send_task ignores CELERY_ALWAYS_EAGER option""" + + +class InvalidTaskError(Exception): + """The task has invalid data or is not properly constructed.""" + + +class IncompleteStream(Exception): + """Found the end of a stream of data, but the data is not yet complete.""" + + +class ChordError(Exception): + """A task part of the chord raised an exception.""" + + +class CPendingDeprecationWarning(PendingDeprecationWarning): + pass + + +class CDeprecationWarning(DeprecationWarning): + pass + + +class FixupWarning(UserWarning): + pass + + +class DuplicateNodenameWarning(UserWarning): + """Multiple workers are using the same nodename.""" diff --git a/thesisenv/lib/python3.6/site-packages/celery/five.py b/thesisenv/lib/python3.6/site-packages/celery/five.py new file mode 100644 index 0000000..2406920 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/five.py @@ -0,0 +1,392 @@ +# -*- coding: utf-8 -*- +""" + celery.five + ~~~~~~~~~~~ + + Compatibility implementations of features + only available in newer Python versions. + + +""" +from __future__ import absolute_import + +import io +import operator +import sys + +from importlib import import_module +from types import ModuleType + +from kombu.five import monotonic + +try: + from collections import Counter +except ImportError: # pragma: no cover + from collections import defaultdict + + def Counter(): # noqa + return defaultdict(int) + +__all__ = ['Counter', 'reload', 'UserList', 'UserDict', 'Queue', 'Empty', + 'zip_longest', 'map', 'string', 'string_t', + 'long_t', 'text_t', 'range', 'int_types', 'items', 'keys', 'values', + 'nextfun', 'reraise', 'WhateverIO', 'with_metaclass', + 'OrderedDict', 'THREAD_TIMEOUT_MAX', 'format_d', + 'class_property', 'reclassmethod', 'create_module', + 'recreate_module', 'monotonic'] + +# ############# py3k ######################################################### +PY3 = sys.version_info[0] == 3 + +try: + reload = reload # noqa +except NameError: # pragma: no cover + from imp import reload # noqa + +try: + from UserList import UserList # noqa +except ImportError: # pragma: no cover + from collections import UserList # noqa + +try: + from UserDict import UserDict # noqa +except ImportError: # pragma: no cover + from collections import UserDict # noqa + + +if PY3: # pragma: no cover + import builtins + + from queue import Queue, Empty + from itertools import zip_longest + + map = map + string = str + string_t = str + long_t = int + text_t = str + range = range + int_types = (int, ) + _byte_t = bytes + + open_fqdn = 'builtins.open' + + def items(d): + return d.items() + + def keys(d): + return d.keys() + + def values(d): + return d.values() + + def nextfun(it): + return it.__next__ + + exec_ = getattr(builtins, 'exec') + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + +else: + import __builtin__ as builtins # noqa + from Queue import Queue, Empty # noqa + from itertools import imap as map, izip_longest as zip_longest # noqa + string = unicode # noqa + string_t = basestring # noqa + text_t = unicode # noqa + long_t = long # noqa + range = xrange # noqa + int_types = (int, long) # noqa + _byte_t = (str, bytes) # noqa + + open_fqdn = '__builtin__.open' + + def items(d): # noqa + return d.iteritems() + + def keys(d): # noqa + return d.iterkeys() + + def values(d): # noqa + return d.itervalues() + + def nextfun(it): # noqa + return it.next + + def exec_(code, globs=None, locs=None): # pragma: no cover + """Execute code in a namespace.""" + if globs is None: + frame = sys._getframe(1) + globs = frame.f_globals + if locs is None: + locs = frame.f_locals + del frame + elif locs is None: + locs = globs + exec("""exec code in globs, locs""") + + exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""") + + +def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])): + """Class decorator to set metaclass. + + Works with both Python 2 and Python 3 and it does not add + an extra class in the lookup order like ``six.with_metaclass`` does + (that is -- it copies the original class instead of using inheritance). + + """ + + def _clone_with_metaclass(Class): + attrs = dict((key, value) for key, value in items(vars(Class)) + if key not in skip_attrs) + return Type(Class.__name__, Class.__bases__, attrs) + + return _clone_with_metaclass + + +# ############# collections.OrderedDict ###################################### +# was moved to kombu +from kombu.utils.compat import OrderedDict # noqa + +# ############# threading.TIMEOUT_MAX ######################################## +try: + from threading import TIMEOUT_MAX as THREAD_TIMEOUT_MAX +except ImportError: + THREAD_TIMEOUT_MAX = 1e10 # noqa + +# ############# format(int, ',d') ############################################ + +if sys.version_info >= (2, 7): # pragma: no cover + def format_d(i): + return format(i, ',d') +else: # pragma: no cover + def format_d(i): # noqa + s = '%d' % i + groups = [] + while s and s[-1].isdigit(): + groups.append(s[-3:]) + s = s[:-3] + return s + ','.join(reversed(groups)) + + +# ############# Module Generation ############################################ + +# Utilities to dynamically +# recreate modules, either for lazy loading or +# to create old modules at runtime instead of +# having them litter the source tree. + +# import fails in python 2.5. fallback to reduce in stdlib +try: + from functools import reduce +except ImportError: + pass + +MODULE_DEPRECATED = """ +The module %s is deprecated and will be removed in a future version. +""" + +DEFAULT_ATTRS = set(['__file__', '__path__', '__doc__', '__all__']) + +# im_func is no longer available in Py3. +# instead the unbound method itself can be used. +if sys.version_info[0] == 3: # pragma: no cover + def fun_of_method(method): + return method +else: + def fun_of_method(method): # noqa + return method.im_func + + +def getappattr(path): + """Gets attribute from the current_app recursively, + e.g. getappattr('amqp.get_task_consumer')``.""" + from celery import current_app + return current_app._rgetattr(path) + + +def _compat_task_decorator(*args, **kwargs): + from celery import current_app + kwargs.setdefault('accept_magic_kwargs', True) + return current_app.task(*args, **kwargs) + + +def _compat_periodic_task_decorator(*args, **kwargs): + from celery.task import periodic_task + kwargs.setdefault('accept_magic_kwargs', True) + return periodic_task(*args, **kwargs) + + +COMPAT_MODULES = { + 'celery': { + 'execute': { + 'send_task': 'send_task', + }, + 'decorators': { + 'task': _compat_task_decorator, + 'periodic_task': _compat_periodic_task_decorator, + }, + 'log': { + 'get_default_logger': 'log.get_default_logger', + 'setup_logger': 'log.setup_logger', + 'setup_logging_subsystem': 'log.setup_logging_subsystem', + 'redirect_stdouts_to_logger': 'log.redirect_stdouts_to_logger', + }, + 'messaging': { + 'TaskPublisher': 'amqp.TaskPublisher', + 'TaskConsumer': 'amqp.TaskConsumer', + 'establish_connection': 'connection', + 'get_consumer_set': 'amqp.TaskConsumer', + }, + 'registry': { + 'tasks': 'tasks', + }, + }, + 'celery.task': { + 'control': { + 'broadcast': 'control.broadcast', + 'rate_limit': 'control.rate_limit', + 'time_limit': 'control.time_limit', + 'ping': 'control.ping', + 'revoke': 'control.revoke', + 'discard_all': 'control.purge', + 'inspect': 'control.inspect', + }, + 'schedules': 'celery.schedules', + 'chords': 'celery.canvas', + } +} + + +class class_property(object): + + def __init__(self, getter=None, setter=None): + if getter is not None and not isinstance(getter, classmethod): + getter = classmethod(getter) + if setter is not None and not isinstance(setter, classmethod): + setter = classmethod(setter) + self.__get = getter + self.__set = setter + + info = getter.__get__(object) # just need the info attrs. + self.__doc__ = info.__doc__ + self.__name__ = info.__name__ + self.__module__ = info.__module__ + + def __get__(self, obj, type=None): + if obj and type is None: + type = obj.__class__ + return self.__get.__get__(obj, type)() + + def __set__(self, obj, value): + if obj is None: + return self + return self.__set.__get__(obj)(value) + + def setter(self, setter): + return self.__class__(self.__get, setter) + + +def reclassmethod(method): + return classmethod(fun_of_method(method)) + + +class LazyModule(ModuleType): + _compat_modules = () + _all_by_module = {} + _direct = {} + _object_origins = {} + + def __getattr__(self, name): + if name in self._object_origins: + module = __import__(self._object_origins[name], None, None, [name]) + for item in self._all_by_module[module.__name__]: + setattr(self, item, getattr(module, item)) + return getattr(module, name) + elif name in self._direct: # pragma: no cover + module = __import__(self._direct[name], None, None, [name]) + setattr(self, name, module) + return module + return ModuleType.__getattribute__(self, name) + + def __dir__(self): + return list(set(self.__all__) | DEFAULT_ATTRS) + + def __reduce__(self): + return import_module, (self.__name__, ) + + +def create_module(name, attrs, cls_attrs=None, pkg=None, + base=LazyModule, prepare_attr=None): + fqdn = '.'.join([pkg.__name__, name]) if pkg else name + cls_attrs = {} if cls_attrs is None else cls_attrs + pkg, _, modname = name.rpartition('.') + cls_attrs['__module__'] = pkg + + attrs = dict((attr_name, prepare_attr(attr) if prepare_attr else attr) + for attr_name, attr in items(attrs)) + module = sys.modules[fqdn] = type(modname, (base, ), cls_attrs)(fqdn) + module.__dict__.update(attrs) + return module + + +def recreate_module(name, compat_modules=(), by_module={}, direct={}, + base=LazyModule, **attrs): + old_module = sys.modules[name] + origins = get_origins(by_module) + compat_modules = COMPAT_MODULES.get(name, ()) + + cattrs = dict( + _compat_modules=compat_modules, + _all_by_module=by_module, _direct=direct, + _object_origins=origins, + __all__=tuple(set(reduce( + operator.add, + [tuple(v) for v in [compat_modules, origins, direct, attrs]], + ))), + ) + new_module = create_module(name, attrs, cls_attrs=cattrs, base=base) + new_module.__dict__.update(dict((mod, get_compat_module(new_module, mod)) + for mod in compat_modules)) + return old_module, new_module + + +def get_compat_module(pkg, name): + from .local import Proxy + + def prepare(attr): + if isinstance(attr, string_t): + return Proxy(getappattr, (attr, )) + return attr + + attrs = COMPAT_MODULES[pkg.__name__][name] + if isinstance(attrs, string_t): + fqdn = '.'.join([pkg.__name__, name]) + module = sys.modules[fqdn] = import_module(attrs) + return module + attrs['__all__'] = list(attrs) + return create_module(name, dict(attrs), pkg=pkg, prepare_attr=prepare) + + +def get_origins(defs): + origins = {} + for module, attrs in items(defs): + origins.update(dict((attr, module) for attr in attrs)) + return origins + + +_SIO_write = io.StringIO.write +_SIO_init = io.StringIO.__init__ + + +class WhateverIO(io.StringIO): + + def __init__(self, v=None, *a, **kw): + _SIO_init(self, v.decode() if isinstance(v, _byte_t) else v, *a, **kw) + + def write(self, data): + _SIO_write(self, data.decode() if isinstance(data, _byte_t) else data) diff --git a/thesisenv/lib/python3.6/site-packages/celery/fixups/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/fixups/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/celery/fixups/django.py b/thesisenv/lib/python3.6/site-packages/celery/fixups/django.py new file mode 100644 index 0000000..73c5c28 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/fixups/django.py @@ -0,0 +1,266 @@ +from __future__ import absolute_import + +import os +import sys +import warnings + +from kombu.utils import cached_property, symbol_by_name + +from datetime import datetime +from importlib import import_module + +from celery import signals +from celery.exceptions import FixupWarning + +if sys.version_info[0] < 3 and not hasattr(sys, 'pypy_version_info'): + from StringIO import StringIO +else: + from io import StringIO + +__all__ = ['DjangoFixup', 'fixup'] + +ERR_NOT_INSTALLED = """\ +Environment variable DJANGO_SETTINGS_MODULE is defined +but Django is not installed. Will not apply Django fixups! +""" + + +def _maybe_close_fd(fh): + try: + os.close(fh.fileno()) + except (AttributeError, OSError, TypeError): + # TypeError added for celery#962 + pass + + +def fixup(app, env='DJANGO_SETTINGS_MODULE'): + SETTINGS_MODULE = os.environ.get(env) + if SETTINGS_MODULE and 'django' not in app.loader_cls.lower(): + try: + import django # noqa + except ImportError: + warnings.warn(FixupWarning(ERR_NOT_INSTALLED)) + else: + return DjangoFixup(app).install() + + +class DjangoFixup(object): + + def __init__(self, app): + self.app = app + self.app.set_default() + self._worker_fixup = None + + def install(self): + # Need to add project directory to path + sys.path.append(os.getcwd()) + + self.app.loader.now = self.now + self.app.loader.mail_admins = self.mail_admins + + signals.import_modules.connect(self.on_import_modules) + signals.worker_init.connect(self.on_worker_init) + return self + + @cached_property + def worker_fixup(self): + if self._worker_fixup is None: + self._worker_fixup = DjangoWorkerFixup(self.app) + return self._worker_fixup + + def on_import_modules(self, **kwargs): + # call django.setup() before task modules are imported + self.worker_fixup.validate_models() + + def on_worker_init(self, **kwargs): + self.worker_fixup.install() + + def now(self, utc=False): + return datetime.utcnow() if utc else self._now() + + def mail_admins(self, subject, body, fail_silently=False, **kwargs): + return self._mail_admins(subject, body, fail_silently=fail_silently) + + @cached_property + def _mail_admins(self): + return symbol_by_name('django.core.mail:mail_admins') + + @cached_property + def _now(self): + try: + return symbol_by_name('django.utils.timezone:now') + except (AttributeError, ImportError): # pre django-1.4 + return datetime.now + + +class DjangoWorkerFixup(object): + _db_recycles = 0 + + def __init__(self, app): + self.app = app + self.db_reuse_max = self.app.conf.get('CELERY_DB_REUSE_MAX', None) + self._db = import_module('django.db') + self._cache = import_module('django.core.cache') + self._settings = symbol_by_name('django.conf:settings') + + # Database-related exceptions. + DatabaseError = symbol_by_name('django.db:DatabaseError') + try: + import MySQLdb as mysql + _my_database_errors = (mysql.DatabaseError, + mysql.InterfaceError, + mysql.OperationalError) + except ImportError: + _my_database_errors = () # noqa + try: + import psycopg2 as pg + _pg_database_errors = (pg.DatabaseError, + pg.InterfaceError, + pg.OperationalError) + except ImportError: + _pg_database_errors = () # noqa + try: + import sqlite3 + _lite_database_errors = (sqlite3.DatabaseError, + sqlite3.InterfaceError, + sqlite3.OperationalError) + except ImportError: + _lite_database_errors = () # noqa + try: + import cx_Oracle as oracle + _oracle_database_errors = (oracle.DatabaseError, + oracle.InterfaceError, + oracle.OperationalError) + except ImportError: + _oracle_database_errors = () # noqa + + try: + self._close_old_connections = symbol_by_name( + 'django.db:close_old_connections', + ) + except (ImportError, AttributeError): + self._close_old_connections = None + self.database_errors = ( + (DatabaseError, ) + + _my_database_errors + + _pg_database_errors + + _lite_database_errors + + _oracle_database_errors + ) + + def validate_models(self): + import django + try: + django_setup = django.setup + except AttributeError: + pass + else: + django_setup() + s = StringIO() + try: + from django.core.management.validation import get_validation_errors + except ImportError: + from django.core.management.base import BaseCommand + cmd = BaseCommand() + try: + # since django 1.5 + from django.core.management.base import OutputWrapper + cmd.stdout = OutputWrapper(sys.stdout) + cmd.stderr = OutputWrapper(sys.stderr) + except ImportError: + cmd.stdout, cmd.stderr = sys.stdout, sys.stderr + + cmd.check() + else: + num_errors = get_validation_errors(s, None) + if num_errors: + raise RuntimeError( + 'One or more Django models did not validate:\n{0}'.format( + s.getvalue())) + + def install(self): + signals.beat_embedded_init.connect(self.close_database) + signals.worker_ready.connect(self.on_worker_ready) + signals.task_prerun.connect(self.on_task_prerun) + signals.task_postrun.connect(self.on_task_postrun) + signals.worker_process_init.connect(self.on_worker_process_init) + self.close_database() + self.close_cache() + return self + + def on_worker_process_init(self, **kwargs): + # Child process must validate models again if on Windows, + # or if they were started using execv. + if os.environ.get('FORKED_BY_MULTIPROCESSING'): + self.validate_models() + + # close connections: + # the parent process may have established these, + # so need to close them. + + # calling db.close() on some DB connections will cause + # the inherited DB conn to also get broken in the parent + # process so we need to remove it without triggering any + # network IO that close() might cause. + try: + for c in self._db.connections.all(): + if c and c.connection: + _maybe_close_fd(c.connection) + except AttributeError: + if self._db.connection and self._db.connection.connection: + _maybe_close_fd(self._db.connection.connection) + + # use the _ version to avoid DB_REUSE preventing the conn.close() call + self._close_database() + self.close_cache() + + def on_task_prerun(self, sender, **kwargs): + """Called before every task.""" + if not getattr(sender.request, 'is_eager', False): + self.close_database() + + def on_task_postrun(self, sender, **kwargs): + # See http://groups.google.com/group/django-users/ + # browse_thread/thread/78200863d0c07c6d/ + if not getattr(sender.request, 'is_eager', False): + self.close_database() + self.close_cache() + + def close_database(self, **kwargs): + if self._close_old_connections: + return self._close_old_connections() # Django 1.6 + if not self.db_reuse_max: + return self._close_database() + if self._db_recycles >= self.db_reuse_max * 2: + self._db_recycles = 0 + self._close_database() + self._db_recycles += 1 + + def _close_database(self): + try: + funs = [conn.close for conn in self._db.connections.all()] + except AttributeError: + if hasattr(self._db, 'close_old_connections'): # django 1.6 + funs = [self._db.close_old_connections] + else: + # pre multidb, pending deprication in django 1.6 + funs = [self._db.close_connection] + + for close in funs: + try: + close() + except self.database_errors as exc: + str_exc = str(exc) + if 'closed' not in str_exc and 'not connected' not in str_exc: + raise + + def close_cache(self): + try: + self._cache.cache.close() + except (TypeError, AttributeError): + pass + + def on_worker_ready(self, **kwargs): + if self._settings.DEBUG: + warnings.warn('Using settings.DEBUG leads to a memory leak, never ' + 'use this setting in production environments!') diff --git a/thesisenv/lib/python3.6/site-packages/celery/loaders/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/loaders/__init__.py new file mode 100644 index 0000000..2a39ba2 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/loaders/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +""" + celery.loaders + ~~~~~~~~~~~~~~ + + Loaders define how configuration is read, what happens + when workers start, when tasks are executed and so on. + +""" +from __future__ import absolute_import + +from celery._state import current_app +from celery.utils import deprecated +from celery.utils.imports import symbol_by_name, import_from_cwd + +__all__ = ['get_loader_cls'] + +LOADER_ALIASES = {'app': 'celery.loaders.app:AppLoader', + 'default': 'celery.loaders.default:Loader', + 'django': 'djcelery.loaders:DjangoLoader'} + + +def get_loader_cls(loader): + """Get loader class by name/alias""" + return symbol_by_name(loader, LOADER_ALIASES, imp=import_from_cwd) + + +@deprecated(deprecation=2.5, removal=4.0, + alternative='celery.current_app.loader') +def current_loader(): + return current_app.loader + + +@deprecated(deprecation=2.5, removal=4.0, + alternative='celery.current_app.conf') +def load_settings(): + return current_app.conf diff --git a/thesisenv/lib/python3.6/site-packages/celery/loaders/app.py b/thesisenv/lib/python3.6/site-packages/celery/loaders/app.py new file mode 100644 index 0000000..87f034b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/loaders/app.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +""" + celery.loaders.app + ~~~~~~~~~~~~~~~~~~ + + The default loader used with custom app instances. + +""" +from __future__ import absolute_import + +from .base import BaseLoader + +__all__ = ['AppLoader'] + + +class AppLoader(BaseLoader): + pass diff --git a/thesisenv/lib/python3.6/site-packages/celery/loaders/base.py b/thesisenv/lib/python3.6/site-packages/celery/loaders/base.py new file mode 100644 index 0000000..401be7b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/loaders/base.py @@ -0,0 +1,299 @@ +# -*- coding: utf-8 -*- +""" + celery.loaders.base + ~~~~~~~~~~~~~~~~~~~ + + Loader base class. + +""" +from __future__ import absolute_import + +import anyjson +import imp as _imp +import importlib +import os +import re +import sys + +from datetime import datetime + +from kombu.utils import cached_property +from kombu.utils.encoding import safe_str + +from celery import signals +from celery.datastructures import DictAttribute, force_mapping +from celery.five import reraise, string_t +from celery.utils.functional import maybe_list +from celery.utils.imports import ( + import_from_cwd, symbol_by_name, NotAPackage, find_module, +) + +__all__ = ['BaseLoader'] + +_RACE_PROTECTION = False +CONFIG_INVALID_NAME = """\ +Error: Module '{module}' doesn't exist, or it's not a valid \ +Python module name. +""" + +CONFIG_WITH_SUFFIX = CONFIG_INVALID_NAME + """\ +Did you mean '{suggest}'? +""" + + +class BaseLoader(object): + """The base class for loaders. + + Loaders handles, + + * Reading celery client/worker configurations. + + * What happens when a task starts? + See :meth:`on_task_init`. + + * What happens when the worker starts? + See :meth:`on_worker_init`. + + * What happens when the worker shuts down? + See :meth:`on_worker_shutdown`. + + * What modules are imported to find tasks? + + """ + builtin_modules = frozenset() + configured = False + override_backends = {} + worker_initialized = False + + _conf = None + + def __init__(self, app, **kwargs): + self.app = app + self.task_modules = set() + + def now(self, utc=True): + if utc: + return datetime.utcnow() + return datetime.now() + + def on_task_init(self, task_id, task): + """This method is called before a task is executed.""" + pass + + def on_process_cleanup(self): + """This method is called after a task is executed.""" + pass + + def on_worker_init(self): + """This method is called when the worker (:program:`celery worker`) + starts.""" + pass + + def on_worker_shutdown(self): + """This method is called when the worker (:program:`celery worker`) + shuts down.""" + pass + + def on_worker_process_init(self): + """This method is called when a child process starts.""" + pass + + def import_task_module(self, module): + self.task_modules.add(module) + return self.import_from_cwd(module) + + def import_module(self, module, package=None): + return importlib.import_module(module, package=package) + + def import_from_cwd(self, module, imp=None, package=None): + return import_from_cwd( + module, + self.import_module if imp is None else imp, + package=package, + ) + + def import_default_modules(self): + signals.import_modules.send(sender=self.app) + return [ + self.import_task_module(m) for m in ( + tuple(self.builtin_modules) + + tuple(maybe_list(self.app.conf.CELERY_IMPORTS)) + + tuple(maybe_list(self.app.conf.CELERY_INCLUDE)) + ) + ] + + def init_worker(self): + if not self.worker_initialized: + self.worker_initialized = True + self.import_default_modules() + self.on_worker_init() + + def shutdown_worker(self): + self.on_worker_shutdown() + + def init_worker_process(self): + self.on_worker_process_init() + + def config_from_object(self, obj, silent=False): + if isinstance(obj, string_t): + try: + obj = self._smart_import(obj, imp=self.import_from_cwd) + except (ImportError, AttributeError): + if silent: + return False + raise + self._conf = force_mapping(obj) + return True + + def _smart_import(self, path, imp=None): + imp = self.import_module if imp is None else imp + if ':' in path: + # Path includes attribute so can just jump here. + # e.g. ``os.path:abspath``. + return symbol_by_name(path, imp=imp) + + # Not sure if path is just a module name or if it includes an + # attribute name (e.g. ``os.path``, vs, ``os.path.abspath``). + try: + return imp(path) + except ImportError: + # Not a module name, so try module + attribute. + return symbol_by_name(path, imp=imp) + + def _import_config_module(self, name): + try: + self.find_module(name) + except NotAPackage: + if name.endswith('.py'): + reraise(NotAPackage, NotAPackage(CONFIG_WITH_SUFFIX.format( + module=name, suggest=name[:-3])), sys.exc_info()[2]) + reraise(NotAPackage, NotAPackage(CONFIG_INVALID_NAME.format( + module=name)), sys.exc_info()[2]) + else: + return self.import_from_cwd(name) + + def find_module(self, module): + return find_module(module) + + def cmdline_config_parser( + self, args, namespace='celery', + re_type=re.compile(r'\((\w+)\)'), + extra_types={'json': anyjson.loads}, + override_types={'tuple': 'json', + 'list': 'json', + 'dict': 'json'}): + from celery.app.defaults import Option, NAMESPACES + namespace = namespace.upper() + typemap = dict(Option.typemap, **extra_types) + + def getarg(arg): + """Parse a single configuration definition from + the command-line.""" + + # ## find key/value + # ns.key=value|ns_key=value (case insensitive) + key, value = arg.split('=', 1) + key = key.upper().replace('.', '_') + + # ## find namespace. + # .key=value|_key=value expands to default namespace. + if key[0] == '_': + ns, key = namespace, key[1:] + else: + # find namespace part of key + ns, key = key.split('_', 1) + + ns_key = (ns and ns + '_' or '') + key + + # (type)value makes cast to custom type. + cast = re_type.match(value) + if cast: + type_ = cast.groups()[0] + type_ = override_types.get(type_, type_) + value = value[len(cast.group()):] + value = typemap[type_](value) + else: + try: + value = NAMESPACES[ns][key].to_python(value) + except ValueError as exc: + # display key name in error message. + raise ValueError('{0!r}: {1}'.format(ns_key, exc)) + return ns_key, value + return dict(getarg(arg) for arg in args) + + def mail_admins(self, subject, body, fail_silently=False, + sender=None, to=None, host=None, port=None, + user=None, password=None, timeout=None, + use_ssl=False, use_tls=False, charset='utf-8'): + message = self.mail.Message(sender=sender, to=to, + subject=safe_str(subject), + body=safe_str(body), + charset=charset) + mailer = self.mail.Mailer(host=host, port=port, + user=user, password=password, + timeout=timeout, use_ssl=use_ssl, + use_tls=use_tls) + mailer.send(message, fail_silently=fail_silently) + + def read_configuration(self, env='CELERY_CONFIG_MODULE'): + try: + custom_config = os.environ[env] + except KeyError: + pass + else: + if custom_config: + usercfg = self._import_config_module(custom_config) + return DictAttribute(usercfg) + return {} + + def autodiscover_tasks(self, packages, related_name='tasks'): + self.task_modules.update( + mod.__name__ for mod in autodiscover_tasks(packages or (), + related_name) if mod) + + @property + def conf(self): + """Loader configuration.""" + if self._conf is None: + self._conf = self.read_configuration() + return self._conf + + @cached_property + def mail(self): + return self.import_module('celery.utils.mail') + + +def autodiscover_tasks(packages, related_name='tasks'): + global _RACE_PROTECTION + + if _RACE_PROTECTION: + return () + _RACE_PROTECTION = True + try: + return [find_related_module(pkg, related_name) for pkg in packages] + finally: + _RACE_PROTECTION = False + + +def find_related_module(package, related_name): + """Given a package name and a module name, tries to find that + module.""" + + # Django 1.7 allows for speciying a class name in INSTALLED_APPS. + # (Issue #2248). + try: + importlib.import_module(package) + except ImportError: + package, _, _ = package.rpartition('.') + + try: + pkg_path = importlib.import_module(package).__path__ + except AttributeError: + return + + try: + _imp.find_module(related_name, pkg_path) + except ImportError: + return + + return importlib.import_module('{0}.{1}'.format(package, related_name)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/loaders/default.py b/thesisenv/lib/python3.6/site-packages/celery/loaders/default.py new file mode 100644 index 0000000..6071480 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/loaders/default.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +""" + celery.loaders.default + ~~~~~~~~~~~~~~~~~~~~~~ + + The default loader used when no custom app has been initialized. + +""" +from __future__ import absolute_import + +import os +import warnings + +from celery.datastructures import DictAttribute +from celery.exceptions import NotConfigured +from celery.utils import strtobool + +from .base import BaseLoader + +__all__ = ['Loader', 'DEFAULT_CONFIG_MODULE'] + +DEFAULT_CONFIG_MODULE = 'celeryconfig' + +#: Warns if configuration file is missing if :envvar:`C_WNOCONF` is set. +C_WNOCONF = strtobool(os.environ.get('C_WNOCONF', False)) + + +class Loader(BaseLoader): + """The loader used by the default app.""" + + def setup_settings(self, settingsdict): + return DictAttribute(settingsdict) + + def read_configuration(self, fail_silently=True): + """Read configuration from :file:`celeryconfig.py` and configure + celery and Django so it can be used by regular Python.""" + configname = os.environ.get('CELERY_CONFIG_MODULE', + DEFAULT_CONFIG_MODULE) + try: + usercfg = self._import_config_module(configname) + except ImportError: + if not fail_silently: + raise + # billiard sets this if forked using execv + if C_WNOCONF and not os.environ.get('FORKED_BY_MULTIPROCESSING'): + warnings.warn(NotConfigured( + 'No {module} module found! Please make sure it exists and ' + 'is available to Python.'.format(module=configname))) + return self.setup_settings({}) + else: + self.configured = True + return self.setup_settings(usercfg) diff --git a/thesisenv/lib/python3.6/site-packages/celery/local.py b/thesisenv/lib/python3.6/site-packages/celery/local.py new file mode 100644 index 0000000..50da8bc --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/local.py @@ -0,0 +1,373 @@ +# -*- coding: utf-8 -*- +""" + celery.local + ~~~~~~~~~~~~ + + This module contains critical utilities that + needs to be loaded as soon as possible, and that + shall not load any third party modules. + + Parts of this module is Copyright by Werkzeug Team. + +""" +from __future__ import absolute_import + +import importlib +import sys + +from .five import string + +__all__ = ['Proxy', 'PromiseProxy', 'try_import', 'maybe_evaluate'] + +__module__ = __name__ # used by Proxy class body + +PY3 = sys.version_info[0] == 3 + + +def _default_cls_attr(name, type_, cls_value): + # Proxy uses properties to forward the standard + # class attributes __module__, __name__ and __doc__ to the real + # object, but these needs to be a string when accessed from + # the Proxy class directly. This is a hack to make that work. + # -- See Issue #1087. + + def __new__(cls, getter): + instance = type_.__new__(cls, cls_value) + instance.__getter = getter + return instance + + def __get__(self, obj, cls=None): + return self.__getter(obj) if obj is not None else self + + return type(name, (type_, ), { + '__new__': __new__, '__get__': __get__, + }) + + +def try_import(module, default=None): + """Try to import and return module, or return + None if the module does not exist.""" + try: + return importlib.import_module(module) + except ImportError: + return default + + +class Proxy(object): + """Proxy to another object.""" + + # Code stolen from werkzeug.local.Proxy. + __slots__ = ('__local', '__args', '__kwargs', '__dict__') + + def __init__(self, local, + args=None, kwargs=None, name=None, __doc__=None): + object.__setattr__(self, '_Proxy__local', local) + object.__setattr__(self, '_Proxy__args', args or ()) + object.__setattr__(self, '_Proxy__kwargs', kwargs or {}) + if name is not None: + object.__setattr__(self, '__custom_name__', name) + if __doc__ is not None: + object.__setattr__(self, '__doc__', __doc__) + + @_default_cls_attr('name', str, __name__) + def __name__(self): + try: + return self.__custom_name__ + except AttributeError: + return self._get_current_object().__name__ + + @_default_cls_attr('module', str, __module__) + def __module__(self): + return self._get_current_object().__module__ + + @_default_cls_attr('doc', str, __doc__) + def __doc__(self): + return self._get_current_object().__doc__ + + def _get_class(self): + return self._get_current_object().__class__ + + @property + def __class__(self): + return self._get_class() + + def _get_current_object(self): + """Return the current object. This is useful if you want the real + object behind the proxy at a time for performance reasons or because + you want to pass the object into a different context. + """ + loc = object.__getattribute__(self, '_Proxy__local') + if not hasattr(loc, '__release_local__'): + return loc(*self.__args, **self.__kwargs) + try: + return getattr(loc, self.__name__) + except AttributeError: + raise RuntimeError('no object bound to {0.__name__}'.format(self)) + + @property + def __dict__(self): + try: + return self._get_current_object().__dict__ + except RuntimeError: # pragma: no cover + raise AttributeError('__dict__') + + def __repr__(self): + try: + obj = self._get_current_object() + except RuntimeError: # pragma: no cover + return '<{0} unbound>'.format(self.__class__.__name__) + return repr(obj) + + def __bool__(self): + try: + return bool(self._get_current_object()) + except RuntimeError: # pragma: no cover + return False + __nonzero__ = __bool__ # Py2 + + def __unicode__(self): + try: + return string(self._get_current_object()) + except RuntimeError: # pragma: no cover + return repr(self) + + def __dir__(self): + try: + return dir(self._get_current_object()) + except RuntimeError: # pragma: no cover + return [] + + def __getattr__(self, name): + if name == '__members__': + return dir(self._get_current_object()) + return getattr(self._get_current_object(), name) + + def __setitem__(self, key, value): + self._get_current_object()[key] = value + + def __delitem__(self, key): + del self._get_current_object()[key] + + def __setslice__(self, i, j, seq): + self._get_current_object()[i:j] = seq + + def __delslice__(self, i, j): + del self._get_current_object()[i:j] + + def __setattr__(self, name, value): + setattr(self._get_current_object(), name, value) + + def __delattr__(self, name): + delattr(self._get_current_object(), name) + + def __str__(self): + return str(self._get_current_object()) + + def __lt__(self, other): + return self._get_current_object() < other + + def __le__(self, other): + return self._get_current_object() <= other + + def __eq__(self, other): + return self._get_current_object() == other + + def __ne__(self, other): + return self._get_current_object() != other + + def __gt__(self, other): + return self._get_current_object() > other + + def __ge__(self, other): + return self._get_current_object() >= other + + def __hash__(self): + return hash(self._get_current_object()) + + def __call__(self, *a, **kw): + return self._get_current_object()(*a, **kw) + + def __len__(self): + return len(self._get_current_object()) + + def __getitem__(self, i): + return self._get_current_object()[i] + + def __iter__(self): + return iter(self._get_current_object()) + + def __contains__(self, i): + return i in self._get_current_object() + + def __getslice__(self, i, j): + return self._get_current_object()[i:j] + + def __add__(self, other): + return self._get_current_object() + other + + def __sub__(self, other): + return self._get_current_object() - other + + def __mul__(self, other): + return self._get_current_object() * other + + def __floordiv__(self, other): + return self._get_current_object() // other + + def __mod__(self, other): + return self._get_current_object() % other + + def __divmod__(self, other): + return self._get_current_object().__divmod__(other) + + def __pow__(self, other): + return self._get_current_object() ** other + + def __lshift__(self, other): + return self._get_current_object() << other + + def __rshift__(self, other): + return self._get_current_object() >> other + + def __and__(self, other): + return self._get_current_object() & other + + def __xor__(self, other): + return self._get_current_object() ^ other + + def __or__(self, other): + return self._get_current_object() | other + + def __div__(self, other): + return self._get_current_object().__div__(other) + + def __truediv__(self, other): + return self._get_current_object().__truediv__(other) + + def __neg__(self): + return -(self._get_current_object()) + + def __pos__(self): + return +(self._get_current_object()) + + def __abs__(self): + return abs(self._get_current_object()) + + def __invert__(self): + return ~(self._get_current_object()) + + def __complex__(self): + return complex(self._get_current_object()) + + def __int__(self): + return int(self._get_current_object()) + + def __float__(self): + return float(self._get_current_object()) + + def __oct__(self): + return oct(self._get_current_object()) + + def __hex__(self): + return hex(self._get_current_object()) + + def __index__(self): + return self._get_current_object().__index__() + + def __coerce__(self, other): + return self._get_current_object().__coerce__(other) + + def __enter__(self): + return self._get_current_object().__enter__() + + def __exit__(self, *a, **kw): + return self._get_current_object().__exit__(*a, **kw) + + def __reduce__(self): + return self._get_current_object().__reduce__() + + if not PY3: + def __cmp__(self, other): + return cmp(self._get_current_object(), other) # noqa + + def __long__(self): + return long(self._get_current_object()) # noqa + + +class PromiseProxy(Proxy): + """This is a proxy to an object that has not yet been evaulated. + + :class:`Proxy` will evaluate the object each time, while the + promise will only evaluate it once. + + """ + + __slots__ = ('__pending__', ) + + def _get_current_object(self): + try: + return object.__getattribute__(self, '__thing') + except AttributeError: + return self.__evaluate__() + + def __then__(self, fun, *args, **kwargs): + if self.__evaluated__(): + return fun(*args, **kwargs) + from collections import deque + try: + pending = object.__getattribute__(self, '__pending__') + except AttributeError: + pending = None + if pending is None: + pending = deque() + object.__setattr__(self, '__pending__', pending) + pending.append((fun, args, kwargs)) + + def __evaluated__(self): + try: + object.__getattribute__(self, '__thing') + except AttributeError: + return False + return True + + def __maybe_evaluate__(self): + return self._get_current_object() + + def __evaluate__(self, + _clean=('_Proxy__local', + '_Proxy__args', + '_Proxy__kwargs')): + try: + thing = Proxy._get_current_object(self) + except: + raise + else: + object.__setattr__(self, '__thing', thing) + for attr in _clean: + try: + object.__delattr__(self, attr) + except AttributeError: # pragma: no cover + # May mask errors so ignore + pass + try: + pending = object.__getattribute__(self, '__pending__') + except AttributeError: + pass + else: + try: + while pending: + fun, args, kwargs = pending.popleft() + fun(*args, **kwargs) + finally: + try: + object.__delattr__(self, '__pending__') + except AttributeError: + pass + return thing + + +def maybe_evaluate(obj): + try: + return obj.__maybe_evaluate__() + except AttributeError: + return obj diff --git a/thesisenv/lib/python3.6/site-packages/celery/platforms.py b/thesisenv/lib/python3.6/site-packages/celery/platforms.py new file mode 100644 index 0000000..b0242d5 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/platforms.py @@ -0,0 +1,813 @@ +# -*- coding: utf-8 -*- +""" + celery.platforms + ~~~~~~~~~~~~~~~~ + + Utilities dealing with platform specifics: signals, daemonization, + users, groups, and so on. + +""" +from __future__ import absolute_import, print_function + +import atexit +import errno +import math +import numbers +import os +import platform as _platform +import signal as _signal +import sys +import warnings + +from collections import namedtuple + +from billiard import current_process +# fileno used to be in this module +from kombu.utils import maybe_fileno +from kombu.utils.compat import get_errno +from kombu.utils.encoding import safe_str +from contextlib import contextmanager + +from .local import try_import +from .five import items, range, reraise, string_t, zip_longest +from .utils.functional import uniq + +_setproctitle = try_import('setproctitle') +resource = try_import('resource') +pwd = try_import('pwd') +grp = try_import('grp') +mputil = try_import('multiprocessing.util') + +__all__ = ['EX_OK', 'EX_FAILURE', 'EX_UNAVAILABLE', 'EX_USAGE', 'SYSTEM', + 'IS_OSX', 'IS_WINDOWS', 'pyimplementation', 'LockFailed', + 'get_fdmax', 'Pidfile', 'create_pidlock', + 'close_open_fds', 'DaemonContext', 'detached', 'parse_uid', + 'parse_gid', 'setgroups', 'initgroups', 'setgid', 'setuid', + 'maybe_drop_privileges', 'signals', 'set_process_title', + 'set_mp_process_title', 'get_errno_name', 'ignore_errno', + 'fd_by_path'] + +# exitcodes +EX_OK = getattr(os, 'EX_OK', 0) +EX_FAILURE = 1 +EX_UNAVAILABLE = getattr(os, 'EX_UNAVAILABLE', 69) +EX_USAGE = getattr(os, 'EX_USAGE', 64) +EX_CANTCREAT = getattr(os, 'EX_CANTCREAT', 73) + +SYSTEM = _platform.system() +IS_OSX = SYSTEM == 'Darwin' +IS_WINDOWS = SYSTEM == 'Windows' + +DAEMON_WORKDIR = '/' + +PIDFILE_FLAGS = os.O_CREAT | os.O_EXCL | os.O_WRONLY +PIDFILE_MODE = ((os.R_OK | os.W_OK) << 6) | ((os.R_OK) << 3) | ((os.R_OK)) + +PIDLOCKED = """ERROR: Pidfile ({0}) already exists. +Seems we're already running? (pid: {1})""" + +_range = namedtuple('_range', ('start', 'stop')) + +C_FORCE_ROOT = os.environ.get('C_FORCE_ROOT', False) + +ROOT_DISALLOWED = """\ +Running a worker with superuser privileges when the +worker accepts messages serialized with pickle is a very bad idea! + +If you really want to continue then you have to set the C_FORCE_ROOT +environment variable (but please think about this before you do). + +User information: uid={uid} euid={euid} gid={gid} egid={egid} +""" + +ROOT_DISCOURAGED = """\ +You are running the worker with superuser privileges, which is +absolutely not recommended! + +Please specify a different user using the -u option. + +User information: uid={uid} euid={euid} gid={gid} egid={egid} +""" + + +def pyimplementation(): + """Return string identifying the current Python implementation.""" + if hasattr(_platform, 'python_implementation'): + return _platform.python_implementation() + elif sys.platform.startswith('java'): + return 'Jython ' + sys.platform + elif hasattr(sys, 'pypy_version_info'): + v = '.'.join(str(p) for p in sys.pypy_version_info[:3]) + if sys.pypy_version_info[3:]: + v += '-' + ''.join(str(p) for p in sys.pypy_version_info[3:]) + return 'PyPy ' + v + else: + return 'CPython' + + +class LockFailed(Exception): + """Raised if a pidlock can't be acquired.""" + + +def get_fdmax(default=None): + """Return the maximum number of open file descriptors + on this system. + + :keyword default: Value returned if there's no file + descriptor limit. + + """ + try: + return os.sysconf('SC_OPEN_MAX') + except: + pass + if resource is None: # Windows + return default + fdmax = resource.getrlimit(resource.RLIMIT_NOFILE)[1] + if fdmax == resource.RLIM_INFINITY: + return default + return fdmax + + +class Pidfile(object): + """Pidfile + + This is the type returned by :func:`create_pidlock`. + + TIP: Use the :func:`create_pidlock` function instead, + which is more convenient and also removes stale pidfiles (when + the process holding the lock is no longer running). + + """ + + #: Path to the pid lock file. + path = None + + def __init__(self, path): + self.path = os.path.abspath(path) + + def acquire(self): + """Acquire lock.""" + try: + self.write_pid() + except OSError as exc: + reraise(LockFailed, LockFailed(str(exc)), sys.exc_info()[2]) + return self + __enter__ = acquire + + def is_locked(self): + """Return true if the pid lock exists.""" + return os.path.exists(self.path) + + def release(self, *args): + """Release lock.""" + self.remove() + __exit__ = release + + def read_pid(self): + """Read and return the current pid.""" + with ignore_errno('ENOENT'): + with open(self.path, 'r') as fh: + line = fh.readline() + if line.strip() == line: # must contain '\n' + raise ValueError( + 'Partial or invalid pidfile {0.path}'.format(self)) + + try: + return int(line.strip()) + except ValueError: + raise ValueError( + 'pidfile {0.path} contents invalid.'.format(self)) + + def remove(self): + """Remove the lock.""" + with ignore_errno(errno.ENOENT, errno.EACCES): + os.unlink(self.path) + + def remove_if_stale(self): + """Remove the lock if the process is not running. + (does not respond to signals).""" + try: + pid = self.read_pid() + except ValueError as exc: + print('Broken pidfile found. Removing it.', file=sys.stderr) + self.remove() + return True + if not pid: + self.remove() + return True + + try: + os.kill(pid, 0) + except os.error as exc: + if exc.errno == errno.ESRCH: + print('Stale pidfile exists. Removing it.', file=sys.stderr) + self.remove() + return True + return False + + def write_pid(self): + pid = os.getpid() + content = '{0}\n'.format(pid) + + pidfile_fd = os.open(self.path, PIDFILE_FLAGS, PIDFILE_MODE) + pidfile = os.fdopen(pidfile_fd, 'w') + try: + pidfile.write(content) + # flush and sync so that the re-read below works. + pidfile.flush() + try: + os.fsync(pidfile_fd) + except AttributeError: # pragma: no cover + pass + finally: + pidfile.close() + + rfh = open(self.path) + try: + if rfh.read() != content: + raise LockFailed( + "Inconsistency: Pidfile content doesn't match at re-read") + finally: + rfh.close() +PIDFile = Pidfile # compat alias + + +def create_pidlock(pidfile): + """Create and verify pidfile. + + If the pidfile already exists the program exits with an error message, + however if the process it refers to is not running anymore, the pidfile + is deleted and the program continues. + + This function will automatically install an :mod:`atexit` handler + to release the lock at exit, you can skip this by calling + :func:`_create_pidlock` instead. + + :returns: :class:`Pidfile`. + + **Example**: + + .. code-block:: python + + pidlock = create_pidlock('/var/run/app.pid') + + """ + pidlock = _create_pidlock(pidfile) + atexit.register(pidlock.release) + return pidlock + + +def _create_pidlock(pidfile): + pidlock = Pidfile(pidfile) + if pidlock.is_locked() and not pidlock.remove_if_stale(): + print(PIDLOCKED.format(pidfile, pidlock.read_pid()), file=sys.stderr) + raise SystemExit(EX_CANTCREAT) + pidlock.acquire() + return pidlock + + +def fd_by_path(paths): + """Return a list of fds. + + This method returns list of fds corresponding to + file paths passed in paths variable. + + :keyword paths: List of file paths go get fd for. + + :returns: :list:. + + **Example**: + + .. code-block:: python + + keep = fd_by_path(['/dev/urandom', + '/my/precious/']) + """ + stats = set() + for path in paths: + try: + fd = os.open(path, os.O_RDONLY) + except OSError: + continue + try: + stats.add(os.fstat(fd)[1:3]) + finally: + os.close(fd) + + def fd_in_stats(fd): + try: + return os.fstat(fd)[1:3] in stats + except OSError: + return False + + return [_fd for _fd in range(get_fdmax(2048)) if fd_in_stats(_fd)] + + +if hasattr(os, 'closerange'): + + def close_open_fds(keep=None): + # must make sure this is 0-inclusive (Issue #1882) + keep = list(uniq(sorted( + f for f in map(maybe_fileno, keep or []) if f is not None + ))) + maxfd = get_fdmax(default=2048) + kL, kH = iter([-1] + keep), iter(keep + [maxfd]) + for low, high in zip_longest(kL, kH): + if low + 1 != high: + os.closerange(low + 1, high) + +else: + + def close_open_fds(keep=None): # noqa + keep = [maybe_fileno(f) + for f in (keep or []) if maybe_fileno(f) is not None] + for fd in reversed(range(get_fdmax(default=2048))): + if fd not in keep: + with ignore_errno(errno.EBADF): + os.close(fd) + + +class DaemonContext(object): + _is_open = False + + def __init__(self, pidfile=None, workdir=None, umask=None, + fake=False, after_chdir=None, after_forkers=True, + **kwargs): + if isinstance(umask, string_t): + # octal or decimal, depending on initial zero. + umask = int(umask, 8 if umask.startswith('0') else 10) + self.workdir = workdir or DAEMON_WORKDIR + self.umask = umask + self.fake = fake + self.after_chdir = after_chdir + self.after_forkers = after_forkers + self.stdfds = (sys.stdin, sys.stdout, sys.stderr) + + def redirect_to_null(self, fd): + if fd is not None: + dest = os.open(os.devnull, os.O_RDWR) + os.dup2(dest, fd) + + def open(self): + if not self._is_open: + if not self.fake: + self._detach() + + os.chdir(self.workdir) + if self.umask is not None: + os.umask(self.umask) + + if self.after_chdir: + self.after_chdir() + + if not self.fake: + # We need to keep /dev/urandom from closing because + # shelve needs it, and Beat needs shelve to start. + keep = list(self.stdfds) + fd_by_path(['/dev/urandom']) + close_open_fds(keep) + for fd in self.stdfds: + self.redirect_to_null(maybe_fileno(fd)) + if self.after_forkers and mputil is not None: + mputil._run_after_forkers() + + self._is_open = True + __enter__ = open + + def close(self, *args): + if self._is_open: + self._is_open = False + __exit__ = close + + def _detach(self): + if os.fork() == 0: # first child + os.setsid() # create new session + if os.fork() > 0: # second child + os._exit(0) + else: + os._exit(0) + return self + + +def detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0, + workdir=None, fake=False, **opts): + """Detach the current process in the background (daemonize). + + :keyword logfile: Optional log file. The ability to write to this file + will be verified before the process is detached. + :keyword pidfile: Optional pidfile. The pidfile will not be created, + as this is the responsibility of the child. But the process will + exit if the pid lock exists and the pid written is still running. + :keyword uid: Optional user id or user name to change + effective privileges to. + :keyword gid: Optional group id or group name to change effective + privileges to. + :keyword umask: Optional umask that will be effective in the child process. + :keyword workdir: Optional new working directory. + :keyword fake: Don't actually detach, intented for debugging purposes. + :keyword \*\*opts: Ignored. + + **Example**: + + .. code-block:: python + + from celery.platforms import detached, create_pidlock + + with detached(logfile='/var/log/app.log', pidfile='/var/run/app.pid', + uid='nobody'): + # Now in detached child process with effective user set to nobody, + # and we know that our logfile can be written to, and that + # the pidfile is not locked. + pidlock = create_pidlock('/var/run/app.pid') + + # Run the program + program.run(logfile='/var/log/app.log') + + """ + + if not resource: + raise RuntimeError('This platform does not support detach.') + workdir = os.getcwd() if workdir is None else workdir + + signals.reset('SIGCLD') # Make sure SIGCLD is using the default handler. + maybe_drop_privileges(uid=uid, gid=gid) + + def after_chdir_do(): + # Since without stderr any errors will be silently suppressed, + # we need to know that we have access to the logfile. + logfile and open(logfile, 'a').close() + # Doesn't actually create the pidfile, but makes sure it's not stale. + if pidfile: + _create_pidlock(pidfile).release() + + return DaemonContext( + umask=umask, workdir=workdir, fake=fake, after_chdir=after_chdir_do, + ) + + +def parse_uid(uid): + """Parse user id. + + uid can be an integer (uid) or a string (user name), if a user name + the uid is taken from the system user registry. + + """ + try: + return int(uid) + except ValueError: + try: + return pwd.getpwnam(uid).pw_uid + except (AttributeError, KeyError): + raise KeyError('User does not exist: {0}'.format(uid)) + + +def parse_gid(gid): + """Parse group id. + + gid can be an integer (gid) or a string (group name), if a group name + the gid is taken from the system group registry. + + """ + try: + return int(gid) + except ValueError: + try: + return grp.getgrnam(gid).gr_gid + except (AttributeError, KeyError): + raise KeyError('Group does not exist: {0}'.format(gid)) + + +def _setgroups_hack(groups): + """:fun:`setgroups` may have a platform-dependent limit, + and it is not always possible to know in advance what this limit + is, so we use this ugly hack stolen from glibc.""" + groups = groups[:] + + while 1: + try: + return os.setgroups(groups) + except ValueError: # error from Python's check. + if len(groups) <= 1: + raise + groups[:] = groups[:-1] + except OSError as exc: # error from the OS. + if exc.errno != errno.EINVAL or len(groups) <= 1: + raise + groups[:] = groups[:-1] + + +def setgroups(groups): + """Set active groups from a list of group ids.""" + max_groups = None + try: + max_groups = os.sysconf('SC_NGROUPS_MAX') + except Exception: + pass + try: + return _setgroups_hack(groups[:max_groups]) + except OSError as exc: + if exc.errno != errno.EPERM: + raise + if any(group not in groups for group in os.getgroups()): + # we shouldn't be allowed to change to this group. + raise + + +def initgroups(uid, gid): + """Compat version of :func:`os.initgroups` which was first + added to Python 2.7.""" + if not pwd: # pragma: no cover + return + username = pwd.getpwuid(uid)[0] + if hasattr(os, 'initgroups'): # Python 2.7+ + return os.initgroups(username, gid) + groups = [gr.gr_gid for gr in grp.getgrall() + if username in gr.gr_mem] + setgroups(groups) + + +def setgid(gid): + """Version of :func:`os.setgid` supporting group names.""" + os.setgid(parse_gid(gid)) + + +def setuid(uid): + """Version of :func:`os.setuid` supporting usernames.""" + os.setuid(parse_uid(uid)) + + +def maybe_drop_privileges(uid=None, gid=None): + """Change process privileges to new user/group. + + If UID and GID is specified, the real user/group is changed. + + If only UID is specified, the real user is changed, and the group is + changed to the users primary group. + + If only GID is specified, only the group is changed. + + """ + if sys.platform == 'win32': + return + if os.geteuid(): + # no point trying to setuid unless we're root. + if not os.getuid(): + raise AssertionError('contact support') + uid = uid and parse_uid(uid) + gid = gid and parse_gid(gid) + + if uid: + # If GID isn't defined, get the primary GID of the user. + if not gid and pwd: + gid = pwd.getpwuid(uid).pw_gid + # Must set the GID before initgroups(), as setgid() + # is known to zap the group list on some platforms. + + # setgid must happen before setuid (otherwise the setgid operation + # may fail because of insufficient privileges and possibly stay + # in a privileged group). + setgid(gid) + initgroups(uid, gid) + + # at last: + setuid(uid) + # ... and make sure privileges cannot be restored: + try: + setuid(0) + except OSError as exc: + if get_errno(exc) != errno.EPERM: + raise + pass # Good: cannot restore privileges. + else: + raise RuntimeError( + 'non-root user able to restore privileges after setuid.') + else: + gid and setgid(gid) + + if uid and (not os.getuid()) and not (os.geteuid()): + raise AssertionError('Still root uid after drop privileges!') + if gid and (not os.getgid()) and not (os.getegid()): + raise AssertionError('Still root gid after drop privileges!') + + +class Signals(object): + """Convenience interface to :mod:`signals`. + + If the requested signal is not supported on the current platform, + the operation will be ignored. + + **Examples**: + + .. code-block:: python + + >>> from celery.platforms import signals + + >>> from proj.handlers import my_handler + >>> signals['INT'] = my_handler + + >>> signals['INT'] + my_handler + + >>> signals.supported('INT') + True + + >>> signals.signum('INT') + 2 + + >>> signals.ignore('USR1') + >>> signals['USR1'] == signals.ignored + True + + >>> signals.reset('USR1') + >>> signals['USR1'] == signals.default + True + + >>> from proj.handlers import exit_handler, hup_handler + >>> signals.update(INT=exit_handler, + ... TERM=exit_handler, + ... HUP=hup_handler) + + """ + + ignored = _signal.SIG_IGN + default = _signal.SIG_DFL + + if hasattr(_signal, 'setitimer'): + + def arm_alarm(self, seconds): + _signal.setitimer(_signal.ITIMER_REAL, seconds) + else: # pragma: no cover + try: + from itimer import alarm as _itimer_alarm # noqa + except ImportError: + + def arm_alarm(self, seconds): # noqa + _signal.alarm(math.ceil(seconds)) + else: # pragma: no cover + + def arm_alarm(self, seconds): # noqa + return _itimer_alarm(seconds) # noqa + + def reset_alarm(self): + return _signal.alarm(0) + + def supported(self, signal_name): + """Return true value if ``signal_name`` exists on this platform.""" + try: + return self.signum(signal_name) + except AttributeError: + pass + + def signum(self, signal_name): + """Get signal number from signal name.""" + if isinstance(signal_name, numbers.Integral): + return signal_name + if not isinstance(signal_name, string_t) \ + or not signal_name.isupper(): + raise TypeError('signal name must be uppercase string.') + if not signal_name.startswith('SIG'): + signal_name = 'SIG' + signal_name + return getattr(_signal, signal_name) + + def reset(self, *signal_names): + """Reset signals to the default signal handler. + + Does nothing if the platform doesn't support signals, + or the specified signal in particular. + + """ + self.update((sig, self.default) for sig in signal_names) + + def ignore(self, *signal_names): + """Ignore signal using :const:`SIG_IGN`. + + Does nothing if the platform doesn't support signals, + or the specified signal in particular. + + """ + self.update((sig, self.ignored) for sig in signal_names) + + def __getitem__(self, signal_name): + return _signal.getsignal(self.signum(signal_name)) + + def __setitem__(self, signal_name, handler): + """Install signal handler. + + Does nothing if the current platform doesn't support signals, + or the specified signal in particular. + + """ + try: + _signal.signal(self.signum(signal_name), handler) + except (AttributeError, ValueError): + pass + + def update(self, _d_=None, **sigmap): + """Set signal handlers from a mapping.""" + for signal_name, handler in items(dict(_d_ or {}, **sigmap)): + self[signal_name] = handler + +signals = Signals() +get_signal = signals.signum # compat +install_signal_handler = signals.__setitem__ # compat +reset_signal = signals.reset # compat +ignore_signal = signals.ignore # compat + + +def strargv(argv): + arg_start = 2 if 'manage' in argv[0] else 1 + if len(argv) > arg_start: + return ' '.join(argv[arg_start:]) + return '' + + +def set_process_title(progname, info=None): + """Set the ps name for the currently running process. + + Only works if :mod:`setproctitle` is installed. + + """ + proctitle = '[{0}]'.format(progname) + proctitle = '{0} {1}'.format(proctitle, info) if info else proctitle + if _setproctitle: + _setproctitle.setproctitle(safe_str(proctitle)) + return proctitle + + +if os.environ.get('NOSETPS'): # pragma: no cover + + def set_mp_process_title(*a, **k): + pass +else: + + def set_mp_process_title(progname, info=None, hostname=None): # noqa + """Set the ps name using the multiprocessing process name. + + Only works if :mod:`setproctitle` is installed. + + """ + if hostname: + progname = '{0}: {1}'.format(progname, hostname) + return set_process_title( + '{0}:{1}'.format(progname, current_process().name), info=info) + + +def get_errno_name(n): + """Get errno for string, e.g. ``ENOENT``.""" + if isinstance(n, string_t): + return getattr(errno, n) + return n + + +@contextmanager +def ignore_errno(*errnos, **kwargs): + """Context manager to ignore specific POSIX error codes. + + Takes a list of error codes to ignore, which can be either + the name of the code, or the code integer itself:: + + >>> with ignore_errno('ENOENT'): + ... with open('foo', 'r') as fh: + ... return fh.read() + + >>> with ignore_errno(errno.ENOENT, errno.EPERM): + ... pass + + :keyword types: A tuple of exceptions to ignore (when the errno matches), + defaults to :exc:`Exception`. + """ + types = kwargs.get('types') or (Exception, ) + errnos = [get_errno_name(errno) for errno in errnos] + try: + yield + except types as exc: + if not hasattr(exc, 'errno'): + raise + if exc.errno not in errnos: + raise + + +def check_privileges(accept_content): + uid = os.getuid() if hasattr(os, 'getuid') else 65535 + gid = os.getgid() if hasattr(os, 'getgid') else 65535 + euid = os.geteuid() if hasattr(os, 'geteuid') else 65535 + egid = os.getegid() if hasattr(os, 'getegid') else 65535 + + if hasattr(os, 'fchown'): + if not all(hasattr(os, attr) + for attr in ['getuid', 'getgid', 'geteuid', 'getegid']): + raise AssertionError('suspicious platform, contact support') + + if not uid or not gid or not euid or not egid: + if ('pickle' in accept_content or + 'application/x-python-serialize' in accept_content): + if not C_FORCE_ROOT: + try: + print(ROOT_DISALLOWED.format( + uid=uid, euid=euid, gid=gid, egid=egid, + ), file=sys.stderr) + finally: + os._exit(1) + warnings.warn(RuntimeWarning(ROOT_DISCOURAGED.format( + uid=uid, euid=euid, gid=gid, egid=egid, + ))) diff --git a/thesisenv/lib/python3.6/site-packages/celery/result.py b/thesisenv/lib/python3.6/site-packages/celery/result.py new file mode 100644 index 0000000..bf49d72 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/result.py @@ -0,0 +1,925 @@ +# -*- coding: utf-8 -*- +""" + celery.result + ~~~~~~~~~~~~~ + + Task results/state and groups of results. + +""" +from __future__ import absolute_import + +import time +import warnings + +from collections import deque +from contextlib import contextmanager +from copy import copy + +from kombu.utils import cached_property +from kombu.utils.compat import OrderedDict + +from . import current_app +from . import states +from ._state import _set_task_join_will_block, task_join_will_block +from .app import app_or_default +from .datastructures import DependencyGraph, GraphFormatter +from .exceptions import IncompleteStream, TimeoutError +from .five import items, range, string_t, monotonic +from .utils import deprecated + +__all__ = ['ResultBase', 'AsyncResult', 'ResultSet', 'GroupResult', + 'EagerResult', 'result_from_tuple'] + +E_WOULDBLOCK = """\ +Never call result.get() within a task! +See http://docs.celeryq.org/en/latest/userguide/tasks.html\ +#task-synchronous-subtasks + +In Celery 3.2 this will result in an exception being +raised instead of just being a warning. +""" + + +def assert_will_not_block(): + if task_join_will_block(): + warnings.warn(RuntimeWarning(E_WOULDBLOCK)) + + +@contextmanager +def allow_join_result(): + reset_value = task_join_will_block() + _set_task_join_will_block(False) + try: + yield + finally: + _set_task_join_will_block(reset_value) + + +class ResultBase(object): + """Base class for all results""" + + #: Parent result (if part of a chain) + parent = None + + +class AsyncResult(ResultBase): + """Query task state. + + :param id: see :attr:`id`. + :keyword backend: see :attr:`backend`. + + """ + app = None + + #: Error raised for timeouts. + TimeoutError = TimeoutError + + #: The task's UUID. + id = None + + #: The task result backend to use. + backend = None + + def __init__(self, id, backend=None, task_name=None, + app=None, parent=None): + self.app = app_or_default(app or self.app) + self.id = id + self.backend = backend or self.app.backend + self.task_name = task_name + self.parent = parent + self._cache = None + + def as_tuple(self): + parent = self.parent + return (self.id, parent and parent.as_tuple()), None + serializable = as_tuple # XXX compat + + def forget(self): + """Forget about (and possibly remove the result of) this task.""" + self._cache = None + self.backend.forget(self.id) + + def revoke(self, connection=None, terminate=False, signal=None, + wait=False, timeout=None): + """Send revoke signal to all workers. + + Any worker receiving the task, or having reserved the + task, *must* ignore it. + + :keyword terminate: Also terminate the process currently working + on the task (if any). + :keyword signal: Name of signal to send to process if terminate. + Default is TERM. + :keyword wait: Wait for replies from workers. Will wait for 1 second + by default or you can specify a custom ``timeout``. + :keyword timeout: Time in seconds to wait for replies if ``wait`` + enabled. + + """ + self.app.control.revoke(self.id, connection=connection, + terminate=terminate, signal=signal, + reply=wait, timeout=timeout) + + def get(self, timeout=None, propagate=True, interval=0.5, + no_ack=True, follow_parents=True, + EXCEPTION_STATES=states.EXCEPTION_STATES, + PROPAGATE_STATES=states.PROPAGATE_STATES): + """Wait until task is ready, and return its result. + + .. warning:: + + Waiting for tasks within a task may lead to deadlocks. + Please read :ref:`task-synchronous-subtasks`. + + :keyword timeout: How long to wait, in seconds, before the + operation times out. + :keyword propagate: Re-raise exception if the task failed. + :keyword interval: Time to wait (in seconds) before retrying to + retrieve the result. Note that this does not have any effect + when using the amqp result store backend, as it does not + use polling. + :keyword no_ack: Enable amqp no ack (automatically acknowledge + message). If this is :const:`False` then the message will + **not be acked**. + :keyword follow_parents: Reraise any exception raised by parent task. + + :raises celery.exceptions.TimeoutError: if `timeout` is not + :const:`None` and the result does not arrive within `timeout` + seconds. + + If the remote call raised an exception then that exception will + be re-raised. + + """ + assert_will_not_block() + on_interval = None + if follow_parents and propagate and self.parent: + on_interval = self._maybe_reraise_parent_error + on_interval() + + if self._cache: + if propagate: + self.maybe_reraise() + return self.result + + meta = self.backend.wait_for( + self.id, timeout=timeout, + interval=interval, + on_interval=on_interval, + no_ack=no_ack, + ) + if meta: + self._maybe_set_cache(meta) + status = meta['status'] + if status in PROPAGATE_STATES and propagate: + raise meta['result'] + return meta['result'] + wait = get # deprecated alias to :meth:`get`. + + def _maybe_reraise_parent_error(self): + for node in reversed(list(self._parents())): + node.maybe_reraise() + + def _parents(self): + node = self.parent + while node: + yield node + node = node.parent + + def collect(self, intermediate=False, **kwargs): + """Iterator, like :meth:`get` will wait for the task to complete, + but will also follow :class:`AsyncResult` and :class:`ResultSet` + returned by the task, yielding ``(result, value)`` tuples for each + result in the tree. + + An example would be having the following tasks: + + .. code-block:: python + + from celery import group + from proj.celery import app + + @app.task(trail=True) + def A(how_many): + return group(B.s(i) for i in range(how_many))() + + @app.task(trail=True) + def B(i): + return pow2.delay(i) + + @app.task(trail=True) + def pow2(i): + return i ** 2 + + Note that the ``trail`` option must be enabled + so that the list of children is stored in ``result.children``. + This is the default but enabled explicitly for illustration. + + Calling :meth:`collect` would return: + + .. code-block:: python + + >>> from celery.result import ResultBase + >>> from proj.tasks import A + + >>> result = A.delay(10) + >>> [v for v in result.collect() + ... if not isinstance(v, (ResultBase, tuple))] + [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] + + """ + for _, R in self.iterdeps(intermediate=intermediate): + yield R, R.get(**kwargs) + + def get_leaf(self): + value = None + for _, R in self.iterdeps(): + value = R.get() + return value + + def iterdeps(self, intermediate=False): + stack = deque([(None, self)]) + + while stack: + parent, node = stack.popleft() + yield parent, node + if node.ready(): + stack.extend((node, child) for child in node.children or []) + else: + if not intermediate: + raise IncompleteStream() + + def ready(self): + """Returns :const:`True` if the task has been executed. + + If the task is still running, pending, or is waiting + for retry then :const:`False` is returned. + + """ + return self.state in self.backend.READY_STATES + + def successful(self): + """Returns :const:`True` if the task executed successfully.""" + return self.state == states.SUCCESS + + def failed(self): + """Returns :const:`True` if the task failed.""" + return self.state == states.FAILURE + + def maybe_reraise(self): + if self.state in states.PROPAGATE_STATES: + raise self.result + + def build_graph(self, intermediate=False, formatter=None): + graph = DependencyGraph( + formatter=formatter or GraphFormatter(root=self.id, shape='oval'), + ) + for parent, node in self.iterdeps(intermediate=intermediate): + graph.add_arc(node) + if parent: + graph.add_edge(parent, node) + return graph + + def __str__(self): + """`str(self) -> self.id`""" + return str(self.id) + + def __hash__(self): + """`hash(self) -> hash(self.id)`""" + return hash(self.id) + + def __repr__(self): + return '<{0}: {1}>'.format(type(self).__name__, self.id) + + def __eq__(self, other): + if isinstance(other, AsyncResult): + return other.id == self.id + elif isinstance(other, string_t): + return other == self.id + return NotImplemented + + def __ne__(self, other): + return not self.__eq__(other) + + def __copy__(self): + return self.__class__( + self.id, self.backend, self.task_name, self.app, self.parent, + ) + + def __reduce__(self): + return self.__class__, self.__reduce_args__() + + def __reduce_args__(self): + return self.id, self.backend, self.task_name, None, self.parent + + def __del__(self): + self._cache = None + + @cached_property + def graph(self): + return self.build_graph() + + @property + def supports_native_join(self): + return self.backend.supports_native_join + + @property + def children(self): + return self._get_task_meta().get('children') + + def _maybe_set_cache(self, meta): + if meta: + state = meta['status'] + if state == states.SUCCESS or state in states.PROPAGATE_STATES: + return self._set_cache(meta) + return meta + + def _get_task_meta(self): + if self._cache is None: + return self._maybe_set_cache(self.backend.get_task_meta(self.id)) + return self._cache + + def _set_cache(self, d): + children = d.get('children') + if children: + d['children'] = [ + result_from_tuple(child, self.app) for child in children + ] + self._cache = d + return d + + @property + def result(self): + """When the task has been executed, this contains the return value. + If the task raised an exception, this will be the exception + instance.""" + return self._get_task_meta()['result'] + info = result + + @property + def traceback(self): + """Get the traceback of a failed task.""" + return self._get_task_meta().get('traceback') + + @property + def state(self): + """The tasks current state. + + Possible values includes: + + *PENDING* + + The task is waiting for execution. + + *STARTED* + + The task has been started. + + *RETRY* + + The task is to be retried, possibly because of failure. + + *FAILURE* + + The task raised an exception, or has exceeded the retry limit. + The :attr:`result` attribute then contains the + exception raised by the task. + + *SUCCESS* + + The task executed successfully. The :attr:`result` attribute + then contains the tasks return value. + + """ + return self._get_task_meta()['status'] + status = state + + @property + def task_id(self): + """compat alias to :attr:`id`""" + return self.id + + @task_id.setter # noqa + def task_id(self, id): + self.id = id +BaseAsyncResult = AsyncResult # for backwards compatibility. + + +class ResultSet(ResultBase): + """Working with more than one result. + + :param results: List of result instances. + + """ + app = None + + #: List of results in in the set. + results = None + + def __init__(self, results, app=None, **kwargs): + self.app = app_or_default(app or self.app) + self.results = results + + def add(self, result): + """Add :class:`AsyncResult` as a new member of the set. + + Does nothing if the result is already a member. + + """ + if result not in self.results: + self.results.append(result) + + def remove(self, result): + """Remove result from the set; it must be a member. + + :raises KeyError: if the result is not a member. + + """ + if isinstance(result, string_t): + result = self.app.AsyncResult(result) + try: + self.results.remove(result) + except ValueError: + raise KeyError(result) + + def discard(self, result): + """Remove result from the set if it is a member. + + If it is not a member, do nothing. + + """ + try: + self.remove(result) + except KeyError: + pass + + def update(self, results): + """Update set with the union of itself and an iterable with + results.""" + self.results.extend(r for r in results if r not in self.results) + + def clear(self): + """Remove all results from this set.""" + self.results[:] = [] # don't create new list. + + def successful(self): + """Was all of the tasks successful? + + :returns: :const:`True` if all of the tasks finished + successfully (i.e. did not raise an exception). + + """ + return all(result.successful() for result in self.results) + + def failed(self): + """Did any of the tasks fail? + + :returns: :const:`True` if one of the tasks failed. + (i.e., raised an exception) + + """ + return any(result.failed() for result in self.results) + + def maybe_reraise(self): + for result in self.results: + result.maybe_reraise() + + def waiting(self): + """Are any of the tasks incomplete? + + :returns: :const:`True` if one of the tasks are still + waiting for execution. + + """ + return any(not result.ready() for result in self.results) + + def ready(self): + """Did all of the tasks complete? (either by success of failure). + + :returns: :const:`True` if all of the tasks has been + executed. + + """ + return all(result.ready() for result in self.results) + + def completed_count(self): + """Task completion count. + + :returns: the number of tasks completed. + + """ + return sum(int(result.successful()) for result in self.results) + + def forget(self): + """Forget about (and possible remove the result of) all the tasks.""" + for result in self.results: + result.forget() + + def revoke(self, connection=None, terminate=False, signal=None, + wait=False, timeout=None): + """Send revoke signal to all workers for all tasks in the set. + + :keyword terminate: Also terminate the process currently working + on the task (if any). + :keyword signal: Name of signal to send to process if terminate. + Default is TERM. + :keyword wait: Wait for replies from worker. Will wait for 1 second + by default or you can specify a custom ``timeout``. + :keyword timeout: Time in seconds to wait for replies if ``wait`` + enabled. + + """ + self.app.control.revoke([r.id for r in self.results], + connection=connection, timeout=timeout, + terminate=terminate, signal=signal, reply=wait) + + def __iter__(self): + return iter(self.results) + + def __getitem__(self, index): + """`res[i] -> res.results[i]`""" + return self.results[index] + + @deprecated('3.2', '3.3') + def iterate(self, timeout=None, propagate=True, interval=0.5): + """Deprecated method, use :meth:`get` with a callback argument.""" + elapsed = 0.0 + results = OrderedDict((result.id, copy(result)) + for result in self.results) + + while results: + removed = set() + for task_id, result in items(results): + if result.ready(): + yield result.get(timeout=timeout and timeout - elapsed, + propagate=propagate) + removed.add(task_id) + else: + if result.backend.subpolling_interval: + time.sleep(result.backend.subpolling_interval) + for task_id in removed: + results.pop(task_id, None) + time.sleep(interval) + elapsed += interval + if timeout and elapsed >= timeout: + raise TimeoutError('The operation timed out') + + def get(self, timeout=None, propagate=True, interval=0.5, + callback=None, no_ack=True): + """See :meth:`join` + + This is here for API compatibility with :class:`AsyncResult`, + in addition it uses :meth:`join_native` if available for the + current result backend. + + """ + return (self.join_native if self.supports_native_join else self.join)( + timeout=timeout, propagate=propagate, + interval=interval, callback=callback, no_ack=no_ack) + + def join(self, timeout=None, propagate=True, interval=0.5, + callback=None, no_ack=True): + """Gathers the results of all tasks as a list in order. + + .. note:: + + This can be an expensive operation for result store + backends that must resort to polling (e.g. database). + + You should consider using :meth:`join_native` if your backend + supports it. + + .. warning:: + + Waiting for tasks within a task may lead to deadlocks. + Please see :ref:`task-synchronous-subtasks`. + + :keyword timeout: The number of seconds to wait for results before + the operation times out. + + :keyword propagate: If any of the tasks raises an exception, the + exception will be re-raised. + + :keyword interval: Time to wait (in seconds) before retrying to + retrieve a result from the set. Note that this + does not have any effect when using the amqp + result store backend, as it does not use polling. + + :keyword callback: Optional callback to be called for every result + received. Must have signature ``(task_id, value)`` + No results will be returned by this function if + a callback is specified. The order of results + is also arbitrary when a callback is used. + To get access to the result object for a particular + id you will have to generate an index first: + ``index = {r.id: r for r in gres.results.values()}`` + Or you can create new result objects on the fly: + ``result = app.AsyncResult(task_id)`` (both will + take advantage of the backend cache anyway). + + :keyword no_ack: Automatic message acknowledgement (Note that if this + is set to :const:`False` then the messages *will not be + acknowledged*). + + :raises celery.exceptions.TimeoutError: if ``timeout`` is not + :const:`None` and the operation takes longer than ``timeout`` + seconds. + + """ + assert_will_not_block() + time_start = monotonic() + remaining = None + + results = [] + for result in self.results: + remaining = None + if timeout: + remaining = timeout - (monotonic() - time_start) + if remaining <= 0.0: + raise TimeoutError('join operation timed out') + value = result.get( + timeout=remaining, propagate=propagate, + interval=interval, no_ack=no_ack, + ) + if callback: + callback(result.id, value) + else: + results.append(value) + return results + + def iter_native(self, timeout=None, interval=0.5, no_ack=True): + """Backend optimized version of :meth:`iterate`. + + .. versionadded:: 2.2 + + Note that this does not support collecting the results + for different task types using different backends. + + This is currently only supported by the amqp, Redis and cache + result backends. + + """ + results = self.results + if not results: + return iter([]) + return self.backend.get_many( + set(r.id for r in results), + timeout=timeout, interval=interval, no_ack=no_ack, + ) + + def join_native(self, timeout=None, propagate=True, + interval=0.5, callback=None, no_ack=True): + """Backend optimized version of :meth:`join`. + + .. versionadded:: 2.2 + + Note that this does not support collecting the results + for different task types using different backends. + + This is currently only supported by the amqp, Redis and cache + result backends. + + """ + assert_will_not_block() + order_index = None if callback else dict( + (result.id, i) for i, result in enumerate(self.results) + ) + acc = None if callback else [None for _ in range(len(self))] + for task_id, meta in self.iter_native(timeout, interval, no_ack): + value = meta['result'] + if propagate and meta['status'] in states.PROPAGATE_STATES: + raise value + if callback: + callback(task_id, value) + else: + acc[order_index[task_id]] = value + return acc + + def _failed_join_report(self): + return (res for res in self.results + if res.backend.is_cached(res.id) and + res.state in states.PROPAGATE_STATES) + + def __len__(self): + return len(self.results) + + def __eq__(self, other): + if isinstance(other, ResultSet): + return other.results == self.results + return NotImplemented + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return '<{0}: [{1}]>'.format(type(self).__name__, + ', '.join(r.id for r in self.results)) + + @property + def subtasks(self): + """Deprecated alias to :attr:`results`.""" + return self.results + + @property + def supports_native_join(self): + try: + return self.results[0].supports_native_join + except IndexError: + pass + + @property + def backend(self): + return self.app.backend if self.app else self.results[0].backend + + +class GroupResult(ResultSet): + """Like :class:`ResultSet`, but with an associated id. + + This type is returned by :class:`~celery.group`, and the + deprecated TaskSet, meth:`~celery.task.TaskSet.apply_async` method. + + It enables inspection of the tasks state and return values as + a single entity. + + :param id: The id of the group. + :param results: List of result instances. + + """ + + #: The UUID of the group. + id = None + + #: List/iterator of results in the group + results = None + + def __init__(self, id=None, results=None, **kwargs): + self.id = id + ResultSet.__init__(self, results, **kwargs) + + def save(self, backend=None): + """Save group-result for later retrieval using :meth:`restore`. + + Example:: + + >>> def save_and_restore(result): + ... result.save() + ... result = GroupResult.restore(result.id) + + """ + return (backend or self.app.backend).save_group(self.id, self) + + def delete(self, backend=None): + """Remove this result if it was previously saved.""" + (backend or self.app.backend).delete_group(self.id) + + def __reduce__(self): + return self.__class__, self.__reduce_args__() + + def __reduce_args__(self): + return self.id, self.results + + def __bool__(self): + return bool(self.id or self.results) + __nonzero__ = __bool__ # Included for Py2 backwards compatibility + + def __eq__(self, other): + if isinstance(other, GroupResult): + return other.id == self.id and other.results == self.results + return NotImplemented + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return '<{0}: {1} [{2}]>'.format(type(self).__name__, self.id, + ', '.join(r.id for r in self.results)) + + def as_tuple(self): + return self.id, [r.as_tuple() for r in self.results] + serializable = as_tuple # XXX compat + + @property + def children(self): + return self.results + + @classmethod + def restore(self, id, backend=None): + """Restore previously saved group result.""" + return ( + backend or (self.app.backend if self.app else current_app.backend) + ).restore_group(id) + + +class TaskSetResult(GroupResult): + """Deprecated version of :class:`GroupResult`""" + + def __init__(self, taskset_id, results=None, **kwargs): + # XXX supports the taskset_id kwarg. + # XXX previously the "results" arg was named "subtasks". + if 'subtasks' in kwargs: + results = kwargs['subtasks'] + GroupResult.__init__(self, taskset_id, results, **kwargs) + + def itersubtasks(self): + """Deprecated. Use ``iter(self.results)`` instead.""" + return iter(self.results) + + @property + def total(self): + """Deprecated: Use ``len(r)``.""" + return len(self) + + @property + def taskset_id(self): + """compat alias to :attr:`self.id`""" + return self.id + + @taskset_id.setter # noqa + def taskset_id(self, id): + self.id = id + + +class EagerResult(AsyncResult): + """Result that we know has already been executed.""" + task_name = None + + def __init__(self, id, ret_value, state, traceback=None): + self.id = id + self._result = ret_value + self._state = state + self._traceback = traceback + + def _get_task_meta(self): + return {'task_id': self.id, 'result': self._result, 'status': + self._state, 'traceback': self._traceback} + + def __reduce__(self): + return self.__class__, self.__reduce_args__() + + def __reduce_args__(self): + return (self.id, self._result, self._state, self._traceback) + + def __copy__(self): + cls, args = self.__reduce__() + return cls(*args) + + def ready(self): + return True + + def get(self, timeout=None, propagate=True, **kwargs): + if self.successful(): + return self.result + elif self.state in states.PROPAGATE_STATES: + if propagate: + raise self.result + return self.result + wait = get + + def forget(self): + pass + + def revoke(self, *args, **kwargs): + self._state = states.REVOKED + + def __repr__(self): + return ''.format(self) + + @property + def result(self): + """The tasks return value""" + return self._result + + @property + def state(self): + """The tasks state.""" + return self._state + status = state + + @property + def traceback(self): + """The traceback if the task failed.""" + return self._traceback + + @property + def supports_native_join(self): + return False + + +def result_from_tuple(r, app=None): + # earlier backends may just pickle, so check if + # result is already prepared. + app = app_or_default(app) + Result = app.AsyncResult + if not isinstance(r, ResultBase): + res, nodes = r + if nodes: + return app.GroupResult( + res, [result_from_tuple(child, app) for child in nodes], + ) + # previously did not include parent + id, parent = res if isinstance(res, (list, tuple)) else (res, None) + if parent: + parent = result_from_tuple(parent, app) + return Result(id, parent=parent) + return r +from_serializable = result_from_tuple # XXX compat diff --git a/thesisenv/lib/python3.6/site-packages/celery/schedules.py b/thesisenv/lib/python3.6/site-packages/celery/schedules.py new file mode 100644 index 0000000..6424dfa --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/schedules.py @@ -0,0 +1,593 @@ +# -*- coding: utf-8 -*- +""" + celery.schedules + ~~~~~~~~~~~~~~~~ + + Schedules define the intervals at which periodic tasks + should run. + +""" +from __future__ import absolute_import + +import numbers +import re + +from collections import namedtuple +from datetime import datetime, timedelta + +from kombu.utils import cached_property + +from . import current_app +from .five import range, string_t +from .utils import is_iterable +from .utils.timeutils import ( + timedelta_seconds, weekday, maybe_timedelta, remaining, + humanize_seconds, timezone, maybe_make_aware, ffwd +) +from .datastructures import AttributeDict + +__all__ = ['ParseException', 'schedule', 'crontab', 'crontab_parser', + 'maybe_schedule'] + +schedstate = namedtuple('schedstate', ('is_due', 'next')) + + +CRON_PATTERN_INVALID = """\ +Invalid crontab pattern. Valid range is {min}-{max}. \ +'{value}' was found.\ +""" + +CRON_INVALID_TYPE = """\ +Argument cronspec needs to be of any of the following types: \ +int, str, or an iterable type. {type!r} was given.\ +""" + +CRON_REPR = """\ +\ +""" + + +def cronfield(s): + return '*' if s is None else s + + +class ParseException(Exception): + """Raised by crontab_parser when the input can't be parsed.""" + + +class schedule(object): + """Schedule for periodic task. + + :param run_every: Interval in seconds (or a :class:`~datetime.timedelta`). + :param relative: If set to True the run time will be rounded to the + resolution of the interval. + :param nowfun: Function returning the current date and time + (class:`~datetime.datetime`). + :param app: Celery app instance. + + """ + relative = False + + def __init__(self, run_every=None, relative=False, nowfun=None, app=None): + self.run_every = maybe_timedelta(run_every) + self.relative = relative + self.nowfun = nowfun + self._app = app + + def now(self): + return (self.nowfun or self.app.now)() + + def remaining_estimate(self, last_run_at): + return remaining( + self.maybe_make_aware(last_run_at), self.run_every, + self.maybe_make_aware(self.now()), self.relative, + ) + + def is_due(self, last_run_at): + """Returns tuple of two items `(is_due, next_time_to_check)`, + where next time to check is in seconds. + + e.g. + + * `(True, 20)`, means the task should be run now, and the next + time to check is in 20 seconds. + + * `(False, 12.3)`, means the task is not due, but that the scheduler + should check again in 12.3 seconds. + + The next time to check is used to save energy/cpu cycles, + it does not need to be accurate but will influence the precision + of your schedule. You must also keep in mind + the value of :setting:`CELERYBEAT_MAX_LOOP_INTERVAL`, + which decides the maximum number of seconds the scheduler can + sleep between re-checking the periodic task intervals. So if you + have a task that changes schedule at runtime then your next_run_at + check will decide how long it will take before a change to the + schedule takes effect. The max loop interval takes precendence + over the next check at value returned. + + .. admonition:: Scheduler max interval variance + + The default max loop interval may vary for different schedulers. + For the default scheduler the value is 5 minutes, but for e.g. + the django-celery database scheduler the value is 5 seconds. + + """ + last_run_at = self.maybe_make_aware(last_run_at) + rem_delta = self.remaining_estimate(last_run_at) + remaining_s = timedelta_seconds(rem_delta) + if remaining_s == 0: + return schedstate(is_due=True, next=self.seconds) + return schedstate(is_due=False, next=remaining_s) + + def maybe_make_aware(self, dt): + if self.utc_enabled: + return maybe_make_aware(dt, self.tz) + return dt + + def __repr__(self): + return ''.format(self) + + def __eq__(self, other): + if isinstance(other, schedule): + return self.run_every == other.run_every + return self.run_every == other + + def __ne__(self, other): + return not self.__eq__(other) + + def __reduce__(self): + return self.__class__, (self.run_every, self.relative, self.nowfun) + + @property + def seconds(self): + return timedelta_seconds(self.run_every) + + @property + def human_seconds(self): + return humanize_seconds(self.seconds) + + @property + def app(self): + return self._app or current_app._get_current_object() + + @app.setter # noqa + def app(self, app): + self._app = app + + @cached_property + def tz(self): + return self.app.timezone + + @cached_property + def utc_enabled(self): + return self.app.conf.CELERY_ENABLE_UTC + + def to_local(self, dt): + if not self.utc_enabled: + return timezone.to_local_fallback(dt) + return dt + + +class crontab_parser(object): + """Parser for crontab expressions. Any expression of the form 'groups' + (see BNF grammar below) is accepted and expanded to a set of numbers. + These numbers represent the units of time that the crontab needs to + run on:: + + digit :: '0'..'9' + dow :: 'a'..'z' + number :: digit+ | dow+ + steps :: number + range :: number ( '-' number ) ? + numspec :: '*' | range + expr :: numspec ( '/' steps ) ? + groups :: expr ( ',' expr ) * + + The parser is a general purpose one, useful for parsing hours, minutes and + day_of_week expressions. Example usage:: + + >>> minutes = crontab_parser(60).parse('*/15') + [0, 15, 30, 45] + >>> hours = crontab_parser(24).parse('*/4') + [0, 4, 8, 12, 16, 20] + >>> day_of_week = crontab_parser(7).parse('*') + [0, 1, 2, 3, 4, 5, 6] + + It can also parse day_of_month and month_of_year expressions if initialized + with an minimum of 1. Example usage:: + + >>> days_of_month = crontab_parser(31, 1).parse('*/3') + [1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31] + >>> months_of_year = crontab_parser(12, 1).parse('*/2') + [1, 3, 5, 7, 9, 11] + >>> months_of_year = crontab_parser(12, 1).parse('2-12/2') + [2, 4, 6, 8, 10, 12] + + The maximum possible expanded value returned is found by the formula:: + + max_ + min_ - 1 + + """ + ParseException = ParseException + + _range = r'(\w+?)-(\w+)' + _steps = r'/(\w+)?' + _star = r'\*' + + def __init__(self, max_=60, min_=0): + self.max_ = max_ + self.min_ = min_ + self.pats = ( + (re.compile(self._range + self._steps), self._range_steps), + (re.compile(self._range), self._expand_range), + (re.compile(self._star + self._steps), self._star_steps), + (re.compile('^' + self._star + '$'), self._expand_star), + ) + + def parse(self, spec): + acc = set() + for part in spec.split(','): + if not part: + raise self.ParseException('empty part') + acc |= set(self._parse_part(part)) + return acc + + def _parse_part(self, part): + for regex, handler in self.pats: + m = regex.match(part) + if m: + return handler(m.groups()) + return self._expand_range((part, )) + + def _expand_range(self, toks): + fr = self._expand_number(toks[0]) + if len(toks) > 1: + to = self._expand_number(toks[1]) + if to < fr: # Wrap around max_ if necessary + return (list(range(fr, self.min_ + self.max_)) + + list(range(self.min_, to + 1))) + return list(range(fr, to + 1)) + return [fr] + + def _range_steps(self, toks): + if len(toks) != 3 or not toks[2]: + raise self.ParseException('empty filter') + return self._expand_range(toks[:2])[::int(toks[2])] + + def _star_steps(self, toks): + if not toks or not toks[0]: + raise self.ParseException('empty filter') + return self._expand_star()[::int(toks[0])] + + def _expand_star(self, *args): + return list(range(self.min_, self.max_ + self.min_)) + + def _expand_number(self, s): + if isinstance(s, string_t) and s[0] == '-': + raise self.ParseException('negative numbers not supported') + try: + i = int(s) + except ValueError: + try: + i = weekday(s) + except KeyError: + raise ValueError('Invalid weekday literal {0!r}.'.format(s)) + + max_val = self.min_ + self.max_ - 1 + if i > max_val: + raise ValueError( + 'Invalid end range: {0} > {1}.'.format(i, max_val)) + if i < self.min_: + raise ValueError( + 'Invalid beginning range: {0} < {1}.'.format(i, self.min_)) + + return i + + +class crontab(schedule): + """A crontab can be used as the `run_every` value of a + :class:`PeriodicTask` to add cron-like scheduling. + + Like a :manpage:`cron` job, you can specify units of time of when + you would like the task to execute. It is a reasonably complete + implementation of cron's features, so it should provide a fair + degree of scheduling needs. + + You can specify a minute, an hour, a day of the week, a day of the + month, and/or a month in the year in any of the following formats: + + .. attribute:: minute + + - A (list of) integers from 0-59 that represent the minutes of + an hour of when execution should occur; or + - A string representing a crontab pattern. This may get pretty + advanced, like `minute='*/15'` (for every quarter) or + `minute='1,13,30-45,50-59/2'`. + + .. attribute:: hour + + - A (list of) integers from 0-23 that represent the hours of + a day of when execution should occur; or + - A string representing a crontab pattern. This may get pretty + advanced, like `hour='*/3'` (for every three hours) or + `hour='0,8-17/2'` (at midnight, and every two hours during + office hours). + + .. attribute:: day_of_week + + - A (list of) integers from 0-6, where Sunday = 0 and Saturday = + 6, that represent the days of a week that execution should + occur. + - A string representing a crontab pattern. This may get pretty + advanced, like `day_of_week='mon-fri'` (for weekdays only). + (Beware that `day_of_week='*/2'` does not literally mean + 'every two days', but 'every day that is divisible by two'!) + + .. attribute:: day_of_month + + - A (list of) integers from 1-31 that represents the days of the + month that execution should occur. + - A string representing a crontab pattern. This may get pretty + advanced, such as `day_of_month='2-30/3'` (for every even + numbered day) or `day_of_month='1-7,15-21'` (for the first and + third weeks of the month). + + .. attribute:: month_of_year + + - A (list of) integers from 1-12 that represents the months of + the year during which execution can occur. + - A string representing a crontab pattern. This may get pretty + advanced, such as `month_of_year='*/3'` (for the first month + of every quarter) or `month_of_year='2-12/2'` (for every even + numbered month). + + .. attribute:: nowfun + + Function returning the current date and time + (:class:`~datetime.datetime`). + + .. attribute:: app + + The Celery app instance. + + It is important to realize that any day on which execution should + occur must be represented by entries in all three of the day and + month attributes. For example, if `day_of_week` is 0 and `day_of_month` + is every seventh day, only months that begin on Sunday and are also + in the `month_of_year` attribute will have execution events. Or, + `day_of_week` is 1 and `day_of_month` is '1-7,15-21' means every + first and third monday of every month present in `month_of_year`. + + """ + + def __init__(self, minute='*', hour='*', day_of_week='*', + day_of_month='*', month_of_year='*', nowfun=None, app=None): + self._orig_minute = cronfield(minute) + self._orig_hour = cronfield(hour) + self._orig_day_of_week = cronfield(day_of_week) + self._orig_day_of_month = cronfield(day_of_month) + self._orig_month_of_year = cronfield(month_of_year) + self.hour = self._expand_cronspec(hour, 24) + self.minute = self._expand_cronspec(minute, 60) + self.day_of_week = self._expand_cronspec(day_of_week, 7) + self.day_of_month = self._expand_cronspec(day_of_month, 31, 1) + self.month_of_year = self._expand_cronspec(month_of_year, 12, 1) + self.nowfun = nowfun + self._app = app + + @staticmethod + def _expand_cronspec(cronspec, max_, min_=0): + """Takes the given cronspec argument in one of the forms:: + + int (like 7) + str (like '3-5,*/15', '*', or 'monday') + set (like set([0,15,30,45])) + list (like [8-17]) + + And convert it to an (expanded) set representing all time unit + values on which the crontab triggers. Only in case of the base + type being 'str', parsing occurs. (It is fast and + happens only once for each crontab instance, so there is no + significant performance overhead involved.) + + For the other base types, merely Python type conversions happen. + + The argument `max_` is needed to determine the expansion of '*' + and ranges. + The argument `min_` is needed to determine the expansion of '*' + and ranges for 1-based cronspecs, such as day of month or month + of year. The default is sufficient for minute, hour, and day of + week. + + """ + if isinstance(cronspec, numbers.Integral): + result = set([cronspec]) + elif isinstance(cronspec, string_t): + result = crontab_parser(max_, min_).parse(cronspec) + elif isinstance(cronspec, set): + result = cronspec + elif is_iterable(cronspec): + result = set(cronspec) + else: + raise TypeError(CRON_INVALID_TYPE.format(type=type(cronspec))) + + # assure the result does not preceed the min or exceed the max + for number in result: + if number >= max_ + min_ or number < min_: + raise ValueError(CRON_PATTERN_INVALID.format( + min=min_, max=max_ - 1 + min_, value=number)) + return result + + def _delta_to_next(self, last_run_at, next_hour, next_minute): + """ + Takes a datetime of last run, next minute and hour, and + returns a relativedelta for the next scheduled day and time. + Only called when day_of_month and/or month_of_year cronspec + is specified to further limit scheduled task execution. + """ + from bisect import bisect, bisect_left + + datedata = AttributeDict(year=last_run_at.year) + days_of_month = sorted(self.day_of_month) + months_of_year = sorted(self.month_of_year) + + def day_out_of_range(year, month, day): + try: + datetime(year=year, month=month, day=day) + except ValueError: + return True + return False + + def roll_over(): + while 1: + flag = (datedata.dom == len(days_of_month) or + day_out_of_range(datedata.year, + months_of_year[datedata.moy], + days_of_month[datedata.dom]) or + (self.maybe_make_aware(datetime(datedata.year, + months_of_year[datedata.moy], + days_of_month[datedata.dom])) < last_run_at)) + + if flag: + datedata.dom = 0 + datedata.moy += 1 + if datedata.moy == len(months_of_year): + datedata.moy = 0 + datedata.year += 1 + else: + break + + if last_run_at.month in self.month_of_year: + datedata.dom = bisect(days_of_month, last_run_at.day) + datedata.moy = bisect_left(months_of_year, last_run_at.month) + else: + datedata.dom = 0 + datedata.moy = bisect(months_of_year, last_run_at.month) + if datedata.moy == len(months_of_year): + datedata.moy = 0 + roll_over() + + while 1: + th = datetime(year=datedata.year, + month=months_of_year[datedata.moy], + day=days_of_month[datedata.dom]) + if th.isoweekday() % 7 in self.day_of_week: + break + datedata.dom += 1 + roll_over() + + return ffwd(year=datedata.year, + month=months_of_year[datedata.moy], + day=days_of_month[datedata.dom], + hour=next_hour, + minute=next_minute, + second=0, + microsecond=0) + + def now(self): + return (self.nowfun or self.app.now)() + + def __repr__(self): + return CRON_REPR.format(self) + + def __reduce__(self): + return (self.__class__, (self._orig_minute, + self._orig_hour, + self._orig_day_of_week, + self._orig_day_of_month, + self._orig_month_of_year), None) + + def remaining_delta(self, last_run_at, tz=None, ffwd=ffwd): + tz = tz or self.tz + last_run_at = self.maybe_make_aware(last_run_at) + now = self.maybe_make_aware(self.now()) + dow_num = last_run_at.isoweekday() % 7 # Sunday is day 0, not day 7 + + execute_this_date = (last_run_at.month in self.month_of_year and + last_run_at.day in self.day_of_month and + dow_num in self.day_of_week) + + execute_this_hour = (execute_this_date and + last_run_at.day == now.day and + last_run_at.month == now.month and + last_run_at.year == now.year and + last_run_at.hour in self.hour and + last_run_at.minute < max(self.minute)) + + if execute_this_hour: + next_minute = min(minute for minute in self.minute + if minute > last_run_at.minute) + delta = ffwd(minute=next_minute, second=0, microsecond=0) + else: + next_minute = min(self.minute) + execute_today = (execute_this_date and + last_run_at.hour < max(self.hour)) + + if execute_today: + next_hour = min(hour for hour in self.hour + if hour > last_run_at.hour) + delta = ffwd(hour=next_hour, minute=next_minute, + second=0, microsecond=0) + else: + next_hour = min(self.hour) + all_dom_moy = (self._orig_day_of_month == '*' and + self._orig_month_of_year == '*') + if all_dom_moy: + next_day = min([day for day in self.day_of_week + if day > dow_num] or self.day_of_week) + add_week = next_day == dow_num + + delta = ffwd(weeks=add_week and 1 or 0, + weekday=(next_day - 1) % 7, + hour=next_hour, + minute=next_minute, + second=0, + microsecond=0) + else: + delta = self._delta_to_next(last_run_at, + next_hour, next_minute) + return self.to_local(last_run_at), delta, self.to_local(now) + + def remaining_estimate(self, last_run_at, ffwd=ffwd): + """Returns when the periodic task should run next as a timedelta.""" + return remaining(*self.remaining_delta(last_run_at, ffwd=ffwd)) + + def is_due(self, last_run_at): + """Returns tuple of two items `(is_due, next_time_to_run)`, + where next time to run is in seconds. + + See :meth:`celery.schedules.schedule.is_due` for more information. + + """ + rem_delta = self.remaining_estimate(last_run_at) + rem = timedelta_seconds(rem_delta) + due = rem == 0 + if due: + rem_delta = self.remaining_estimate(self.now()) + rem = timedelta_seconds(rem_delta) + return schedstate(due, rem) + + def __eq__(self, other): + if isinstance(other, crontab): + return (other.month_of_year == self.month_of_year and + other.day_of_month == self.day_of_month and + other.day_of_week == self.day_of_week and + other.hour == self.hour and + other.minute == self.minute) + return NotImplemented + + def __ne__(self, other): + return not self.__eq__(other) + + +def maybe_schedule(s, relative=False, app=None): + if s is not None: + if isinstance(s, numbers.Integral): + s = timedelta(seconds=s) + if isinstance(s, timedelta): + return schedule(s, relative, app=app) + else: + s.app = app + return s diff --git a/thesisenv/lib/python3.6/site-packages/celery/security/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/security/__init__.py new file mode 100644 index 0000000..352d400 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/security/__init__.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +""" + celery.security + ~~~~~~~~~~~~~~~ + + Module implementing the signing message serializer. + +""" +from __future__ import absolute_import + +from kombu.serialization import ( + registry, disable_insecure_serializers as _disable_insecure_serializers, +) + +from celery.exceptions import ImproperlyConfigured + +from .serialization import register_auth + +SSL_NOT_INSTALLED = """\ +You need to install the pyOpenSSL library to use the auth serializer. +Please install by: + + $ pip install pyOpenSSL +""" + +SETTING_MISSING = """\ +Sorry, but you have to configure the + * CELERY_SECURITY_KEY + * CELERY_SECURITY_CERTIFICATE, and the + * CELERY_SECURITY_CERT_STORE +configuration settings to use the auth serializer. + +Please see the configuration reference for more information. +""" + +__all__ = ['setup_security'] + + +def setup_security(allowed_serializers=None, key=None, cert=None, store=None, + digest='sha1', serializer='json', app=None): + """See :meth:`@Celery.setup_security`.""" + if app is None: + from celery import current_app + app = current_app._get_current_object() + + _disable_insecure_serializers(allowed_serializers) + + conf = app.conf + if conf.CELERY_TASK_SERIALIZER != 'auth': + return + + try: + from OpenSSL import crypto # noqa + except ImportError: + raise ImproperlyConfigured(SSL_NOT_INSTALLED) + + key = key or conf.CELERY_SECURITY_KEY + cert = cert or conf.CELERY_SECURITY_CERTIFICATE + store = store or conf.CELERY_SECURITY_CERT_STORE + + if not (key and cert and store): + raise ImproperlyConfigured(SETTING_MISSING) + + with open(key) as kf: + with open(cert) as cf: + register_auth(kf.read(), cf.read(), store, digest, serializer) + registry._set_default_serializer('auth') + + +def disable_untrusted_serializers(whitelist=None): + _disable_insecure_serializers(allowed=whitelist) diff --git a/thesisenv/lib/python3.6/site-packages/celery/security/certificate.py b/thesisenv/lib/python3.6/site-packages/celery/security/certificate.py new file mode 100644 index 0000000..c1c520c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/security/certificate.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +""" + celery.security.certificate + ~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + X.509 certificates. + +""" +from __future__ import absolute_import + +import glob +import os + +from kombu.utils.encoding import bytes_to_str + +from celery.exceptions import SecurityError +from celery.five import values + +from .utils import crypto, reraise_errors + +__all__ = ['Certificate', 'CertStore', 'FSCertStore'] + + +class Certificate(object): + """X.509 certificate.""" + + def __init__(self, cert): + assert crypto is not None + with reraise_errors('Invalid certificate: {0!r}'): + self._cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert) + + def has_expired(self): + """Check if the certificate has expired.""" + return self._cert.has_expired() + + def get_serial_number(self): + """Return the serial number in the certificate.""" + return bytes_to_str(self._cert.get_serial_number()) + + def get_issuer(self): + """Return issuer (CA) as a string""" + return ' '.join(bytes_to_str(x[1]) for x in + self._cert.get_issuer().get_components()) + + def get_id(self): + """Serial number/issuer pair uniquely identifies a certificate""" + return '{0} {1}'.format(self.get_issuer(), self.get_serial_number()) + + def verify(self, data, signature, digest): + """Verifies the signature for string containing data.""" + with reraise_errors('Bad signature: {0!r}'): + crypto.verify(self._cert, signature, data, digest) + + +class CertStore(object): + """Base class for certificate stores""" + + def __init__(self): + self._certs = {} + + def itercerts(self): + """an iterator over the certificates""" + for c in values(self._certs): + yield c + + def __getitem__(self, id): + """get certificate by id""" + try: + return self._certs[bytes_to_str(id)] + except KeyError: + raise SecurityError('Unknown certificate: {0!r}'.format(id)) + + def add_cert(self, cert): + cert_id = bytes_to_str(cert.get_id()) + if cert_id in self._certs: + raise SecurityError('Duplicate certificate: {0!r}'.format(id)) + self._certs[cert_id] = cert + + +class FSCertStore(CertStore): + """File system certificate store""" + + def __init__(self, path): + CertStore.__init__(self) + if os.path.isdir(path): + path = os.path.join(path, '*') + for p in glob.glob(path): + with open(p) as f: + cert = Certificate(f.read()) + if cert.has_expired(): + raise SecurityError( + 'Expired certificate: {0!r}'.format(cert.get_id())) + self.add_cert(cert) diff --git a/thesisenv/lib/python3.6/site-packages/celery/security/key.py b/thesisenv/lib/python3.6/site-packages/celery/security/key.py new file mode 100644 index 0000000..a5c2620 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/security/key.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +""" + celery.security.key + ~~~~~~~~~~~~~~~~~~~ + + Private key for the security serializer. + +""" +from __future__ import absolute_import + +from kombu.utils.encoding import ensure_bytes + +from .utils import crypto, reraise_errors + +__all__ = ['PrivateKey'] + + +class PrivateKey(object): + + def __init__(self, key): + with reraise_errors('Invalid private key: {0!r}'): + self._key = crypto.load_privatekey(crypto.FILETYPE_PEM, key) + + def sign(self, data, digest): + """sign string containing data.""" + with reraise_errors('Unable to sign data: {0!r}'): + return crypto.sign(self._key, ensure_bytes(data), digest) diff --git a/thesisenv/lib/python3.6/site-packages/celery/security/serialization.py b/thesisenv/lib/python3.6/site-packages/celery/security/serialization.py new file mode 100644 index 0000000..7548358 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/security/serialization.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +""" + celery.security.serialization + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Secure serializer. + +""" +from __future__ import absolute_import + +import base64 + +from kombu.serialization import registry, dumps, loads +from kombu.utils.encoding import bytes_to_str, str_to_bytes, ensure_bytes + +from .certificate import Certificate, FSCertStore +from .key import PrivateKey +from .utils import reraise_errors + +__all__ = ['SecureSerializer', 'register_auth'] + + +def b64encode(s): + return bytes_to_str(base64.b64encode(str_to_bytes(s))) + + +def b64decode(s): + return base64.b64decode(str_to_bytes(s)) + + +class SecureSerializer(object): + + def __init__(self, key=None, cert=None, cert_store=None, + digest='sha1', serializer='json'): + self._key = key + self._cert = cert + self._cert_store = cert_store + self._digest = digest + self._serializer = serializer + + def serialize(self, data): + """serialize data structure into string""" + assert self._key is not None + assert self._cert is not None + with reraise_errors('Unable to serialize: {0!r}', (Exception, )): + content_type, content_encoding, body = dumps( + bytes_to_str(data), serializer=self._serializer) + # What we sign is the serialized body, not the body itself. + # this way the receiver doesn't have to decode the contents + # to verify the signature (and thus avoiding potential flaws + # in the decoding step). + body = ensure_bytes(body) + return self._pack(body, content_type, content_encoding, + signature=self._key.sign(body, self._digest), + signer=self._cert.get_id()) + + def deserialize(self, data): + """deserialize data structure from string""" + assert self._cert_store is not None + with reraise_errors('Unable to deserialize: {0!r}', (Exception, )): + payload = self._unpack(data) + signature, signer, body = (payload['signature'], + payload['signer'], + payload['body']) + self._cert_store[signer].verify(body, signature, self._digest) + return loads(bytes_to_str(body), payload['content_type'], + payload['content_encoding'], force=True) + + def _pack(self, body, content_type, content_encoding, signer, signature, + sep=str_to_bytes('\x00\x01')): + fields = sep.join( + ensure_bytes(s) for s in [signer, signature, content_type, + content_encoding, body] + ) + return b64encode(fields) + + def _unpack(self, payload, sep=str_to_bytes('\x00\x01')): + raw_payload = b64decode(ensure_bytes(payload)) + first_sep = raw_payload.find(sep) + + signer = raw_payload[:first_sep] + signer_cert = self._cert_store[signer] + + sig_len = signer_cert._cert.get_pubkey().bits() >> 3 + signature = raw_payload[ + first_sep + len(sep):first_sep + len(sep) + sig_len + ] + end_of_sig = first_sep + len(sep) + sig_len + len(sep) + + v = raw_payload[end_of_sig:].split(sep) + + return { + 'signer': signer, + 'signature': signature, + 'content_type': bytes_to_str(v[0]), + 'content_encoding': bytes_to_str(v[1]), + 'body': bytes_to_str(v[2]), + } + + +def register_auth(key=None, cert=None, store=None, digest='sha1', + serializer='json'): + """register security serializer""" + s = SecureSerializer(key and PrivateKey(key), + cert and Certificate(cert), + store and FSCertStore(store), + digest=digest, serializer=serializer) + registry.register('auth', s.serialize, s.deserialize, + content_type='application/data', + content_encoding='utf-8') diff --git a/thesisenv/lib/python3.6/site-packages/celery/security/utils.py b/thesisenv/lib/python3.6/site-packages/celery/security/utils.py new file mode 100644 index 0000000..d184d0b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/security/utils.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +""" + celery.security.utils + ~~~~~~~~~~~~~~~~~~~~~ + + Utilities used by the message signing serializer. + +""" +from __future__ import absolute_import + +import sys + +from contextlib import contextmanager + +from celery.exceptions import SecurityError +from celery.five import reraise + +try: + from OpenSSL import crypto +except ImportError: # pragma: no cover + crypto = None # noqa + +__all__ = ['reraise_errors'] + + +@contextmanager +def reraise_errors(msg='{0!r}', errors=None): + assert crypto is not None + errors = (crypto.Error, ) if errors is None else errors + try: + yield + except errors as exc: + reraise(SecurityError, + SecurityError(msg.format(exc)), + sys.exc_info()[2]) diff --git a/thesisenv/lib/python3.6/site-packages/celery/signals.py b/thesisenv/lib/python3.6/site-packages/celery/signals.py new file mode 100644 index 0000000..2091830 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/signals.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +""" + celery.signals + ~~~~~~~~~~~~~~ + + This module defines the signals (Observer pattern) sent by + both workers and clients. + + Functions can be connected to these signals, and connected + functions are called whenever a signal is called. + + See :ref:`signals` for more information. + +""" +from __future__ import absolute_import +from .utils.dispatch import Signal + +__all__ = ['before_task_publish', 'after_task_publish', + 'task_prerun', 'task_postrun', 'task_success', + 'task_retry', 'task_failure', 'task_revoked', 'celeryd_init', + 'celeryd_after_setup', 'worker_init', 'worker_process_init', + 'worker_ready', 'worker_shutdown', 'setup_logging', + 'after_setup_logger', 'after_setup_task_logger', + 'beat_init', 'beat_embedded_init', 'eventlet_pool_started', + 'eventlet_pool_preshutdown', 'eventlet_pool_postshutdown', + 'eventlet_pool_apply'] + +before_task_publish = Signal(providing_args=[ + 'body', 'exchange', 'routing_key', 'headers', 'properties', + 'declare', 'retry_policy', +]) +after_task_publish = Signal(providing_args=[ + 'body', 'exchange', 'routing_key', +]) +#: Deprecated, use after_task_publish instead. +task_sent = Signal(providing_args=[ + 'task_id', 'task', 'args', 'kwargs', 'eta', 'taskset', +]) +task_prerun = Signal(providing_args=['task_id', 'task', 'args', 'kwargs']) +task_postrun = Signal(providing_args=[ + 'task_id', 'task', 'args', 'kwargs', 'retval', +]) +task_success = Signal(providing_args=['result']) +task_retry = Signal(providing_args=[ + 'request', 'reason', 'einfo', +]) +task_failure = Signal(providing_args=[ + 'task_id', 'exception', 'args', 'kwargs', 'traceback', 'einfo', +]) +task_revoked = Signal(providing_args=[ + 'request', 'terminated', 'signum', 'expired', +]) +celeryd_init = Signal(providing_args=['instance', 'conf', 'options']) +celeryd_after_setup = Signal(providing_args=['instance', 'conf']) +import_modules = Signal(providing_args=[]) +worker_init = Signal(providing_args=[]) +worker_process_init = Signal(providing_args=[]) +worker_process_shutdown = Signal(providing_args=[]) +worker_ready = Signal(providing_args=[]) +worker_shutdown = Signal(providing_args=[]) +setup_logging = Signal(providing_args=[ + 'loglevel', 'logfile', 'format', 'colorize', +]) +after_setup_logger = Signal(providing_args=[ + 'logger', 'loglevel', 'logfile', 'format', 'colorize', +]) +after_setup_task_logger = Signal(providing_args=[ + 'logger', 'loglevel', 'logfile', 'format', 'colorize', +]) +beat_init = Signal(providing_args=[]) +beat_embedded_init = Signal(providing_args=[]) +eventlet_pool_started = Signal(providing_args=[]) +eventlet_pool_preshutdown = Signal(providing_args=[]) +eventlet_pool_postshutdown = Signal(providing_args=[]) +eventlet_pool_apply = Signal(providing_args=['target', 'args', 'kwargs']) +user_preload_options = Signal(providing_args=['app', 'options']) diff --git a/thesisenv/lib/python3.6/site-packages/celery/states.py b/thesisenv/lib/python3.6/site-packages/celery/states.py new file mode 100644 index 0000000..665a57b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/states.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- +""" +celery.states +============= + +Built-in task states. + +.. _states: + +States +------ + +See :ref:`task-states`. + +.. _statesets: + +Sets +---- + +.. state:: READY_STATES + +READY_STATES +~~~~~~~~~~~~ + +Set of states meaning the task result is ready (has been executed). + +.. state:: UNREADY_STATES + +UNREADY_STATES +~~~~~~~~~~~~~~ + +Set of states meaning the task result is not ready (has not been executed). + +.. state:: EXCEPTION_STATES + +EXCEPTION_STATES +~~~~~~~~~~~~~~~~ + +Set of states meaning the task returned an exception. + +.. state:: PROPAGATE_STATES + +PROPAGATE_STATES +~~~~~~~~~~~~~~~~ + +Set of exception states that should propagate exceptions to the user. + +.. state:: ALL_STATES + +ALL_STATES +~~~~~~~~~~ + +Set of all possible states. + + +Misc. +----- + +""" +from __future__ import absolute_import + +__all__ = ['PENDING', 'RECEIVED', 'STARTED', 'SUCCESS', 'FAILURE', + 'REVOKED', 'RETRY', 'IGNORED', 'READY_STATES', 'UNREADY_STATES', + 'EXCEPTION_STATES', 'PROPAGATE_STATES', 'precedence', 'state'] + +#: State precedence. +#: None represents the precedence of an unknown state. +#: Lower index means higher precedence. +PRECEDENCE = ['SUCCESS', + 'FAILURE', + None, + 'REVOKED', + 'STARTED', + 'RECEIVED', + 'RETRY', + 'PENDING'] + +#: Hash lookup of PRECEDENCE to index +PRECEDENCE_LOOKUP = dict(zip(PRECEDENCE, range(0, len(PRECEDENCE)))) +NONE_PRECEDENCE = PRECEDENCE_LOOKUP[None] + + +def precedence(state): + """Get the precedence index for state. + + Lower index means higher precedence. + + """ + try: + return PRECEDENCE_LOOKUP[state] + except KeyError: + return NONE_PRECEDENCE + + +class state(str): + """State is a subclass of :class:`str`, implementing comparison + methods adhering to state precedence rules:: + + >>> from celery.states import state, PENDING, SUCCESS + + >>> state(PENDING) < state(SUCCESS) + True + + Any custom state is considered to be lower than :state:`FAILURE` and + :state:`SUCCESS`, but higher than any of the other built-in states:: + + >>> state('PROGRESS') > state(STARTED) + True + + >>> state('PROGRESS') > state('SUCCESS') + False + + """ + + def compare(self, other, fun): + return fun(precedence(self), precedence(other)) + + def __gt__(self, other): + return precedence(self) < precedence(other) + + def __ge__(self, other): + return precedence(self) <= precedence(other) + + def __lt__(self, other): + return precedence(self) > precedence(other) + + def __le__(self, other): + return precedence(self) >= precedence(other) + +#: Task state is unknown (assumed pending since you know the id). +PENDING = 'PENDING' +#: Task was received by a worker. +RECEIVED = 'RECEIVED' +#: Task was started by a worker (:setting:`CELERY_TRACK_STARTED`). +STARTED = 'STARTED' +#: Task succeeded +SUCCESS = 'SUCCESS' +#: Task failed +FAILURE = 'FAILURE' +#: Task was revoked. +REVOKED = 'REVOKED' +#: Task is waiting for retry. +RETRY = 'RETRY' +IGNORED = 'IGNORED' +REJECTED = 'REJECTED' + +READY_STATES = frozenset([SUCCESS, FAILURE, REVOKED]) +UNREADY_STATES = frozenset([PENDING, RECEIVED, STARTED, RETRY]) +EXCEPTION_STATES = frozenset([RETRY, FAILURE, REVOKED]) +PROPAGATE_STATES = frozenset([FAILURE, REVOKED]) + +ALL_STATES = frozenset([PENDING, RECEIVED, STARTED, + SUCCESS, FAILURE, RETRY, REVOKED]) diff --git a/thesisenv/lib/python3.6/site-packages/celery/task/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/task/__init__.py new file mode 100644 index 0000000..4ab1a2f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/task/__init__.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +""" + celery.task + ~~~~~~~~~~~ + + This is the old task module, it should not be used anymore, + import from the main 'celery' module instead. + If you're looking for the decorator implementation then that's in + ``celery.app.base.Celery.task``. + +""" +from __future__ import absolute_import + +from celery._state import current_app, current_task as current +from celery.five import LazyModule, recreate_module +from celery.local import Proxy + +__all__ = [ + 'BaseTask', 'Task', 'PeriodicTask', 'task', 'periodic_task', + 'group', 'chord', 'subtask', 'TaskSet', +] + + +STATICA_HACK = True +globals()['kcah_acitats'[::-1].upper()] = False +if STATICA_HACK: # pragma: no cover + # This is never executed, but tricks static analyzers (PyDev, PyCharm, + # pylint, etc.) into knowing the types of these symbols, and what + # they contain. + from celery.canvas import group, chord, subtask + from .base import BaseTask, Task, PeriodicTask, task, periodic_task + from .sets import TaskSet + + +class module(LazyModule): + + def __call__(self, *args, **kwargs): + return self.task(*args, **kwargs) + + +old_module, new_module = recreate_module( # pragma: no cover + __name__, + by_module={ + 'celery.task.base': ['BaseTask', 'Task', 'PeriodicTask', + 'task', 'periodic_task'], + 'celery.canvas': ['group', 'chord', 'subtask'], + 'celery.task.sets': ['TaskSet'], + }, + base=module, + __package__='celery.task', + __file__=__file__, + __path__=__path__, + __doc__=__doc__, + current=current, + discard_all=Proxy(lambda: current_app.control.purge), + backend_cleanup=Proxy( + lambda: current_app.tasks['celery.backend_cleanup'] + ), +) diff --git a/thesisenv/lib/python3.6/site-packages/celery/task/base.py b/thesisenv/lib/python3.6/site-packages/celery/task/base.py new file mode 100644 index 0000000..aeb9f82 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/task/base.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +""" + celery.task.base + ~~~~~~~~~~~~~~~~ + + The task implementation has been moved to :mod:`celery.app.task`. + + This contains the backward compatible Task class used in the old API, + and shouldn't be used in new applications. + +""" +from __future__ import absolute_import + +from kombu import Exchange + +from celery import current_app +from celery.app.task import Context, TaskType, Task as BaseTask # noqa +from celery.five import class_property, reclassmethod +from celery.schedules import maybe_schedule +from celery.utils.log import get_task_logger + +__all__ = ['Task', 'PeriodicTask', 'task'] + +#: list of methods that must be classmethods in the old API. +_COMPAT_CLASSMETHODS = ( + 'delay', 'apply_async', 'retry', 'apply', 'subtask_from_request', + 'AsyncResult', 'subtask', '_get_request', '_get_exec_options', +) + + +class Task(BaseTask): + """Deprecated Task base class. + + Modern applications should use :class:`celery.Task` instead. + + """ + abstract = True + __bound__ = False + __v2_compat__ = True + + # - Deprecated compat. attributes -: + + queue = None + routing_key = None + exchange = None + exchange_type = None + delivery_mode = None + mandatory = False # XXX deprecated + immediate = False # XXX deprecated + priority = None + type = 'regular' + disable_error_emails = False + accept_magic_kwargs = False + + from_config = BaseTask.from_config + ( + ('exchange_type', 'CELERY_DEFAULT_EXCHANGE_TYPE'), + ('delivery_mode', 'CELERY_DEFAULT_DELIVERY_MODE'), + ) + + # In old Celery the @task decorator didn't exist, so one would create + # classes instead and use them directly (e.g. MyTask.apply_async()). + # the use of classmethods was a hack so that it was not necessary + # to instantiate the class before using it, but it has only + # given us pain (like all magic). + for name in _COMPAT_CLASSMETHODS: + locals()[name] = reclassmethod(getattr(BaseTask, name)) + + @class_property + def request(cls): + return cls._get_request() + + @class_property + def backend(cls): + if cls._backend is None: + return cls.app.backend + return cls._backend + + @backend.setter + def backend(cls, value): # noqa + cls._backend = value + + @classmethod + def get_logger(self, **kwargs): + return get_task_logger(self.name) + + @classmethod + def establish_connection(self): + """Deprecated method used to get a broker connection. + + Should be replaced with :meth:`@Celery.connection` + instead, or by acquiring connections from the connection pool: + + .. code-block:: python + + # using the connection pool + with celery.pool.acquire(block=True) as conn: + ... + + # establish fresh connection + with celery.connection() as conn: + ... + """ + return self._get_app().connection() + + def get_publisher(self, connection=None, exchange=None, + exchange_type=None, **options): + """Deprecated method to get the task publisher (now called producer). + + Should be replaced with :class:`@amqp.TaskProducer`: + + .. code-block:: python + + with celery.connection() as conn: + with celery.amqp.TaskProducer(conn) as prod: + my_task.apply_async(producer=prod) + + """ + exchange = self.exchange if exchange is None else exchange + if exchange_type is None: + exchange_type = self.exchange_type + connection = connection or self.establish_connection() + return self._get_app().amqp.TaskProducer( + connection, + exchange=exchange and Exchange(exchange, exchange_type), + routing_key=self.routing_key, **options + ) + + @classmethod + def get_consumer(self, connection=None, queues=None, **kwargs): + """Deprecated method used to get consumer for the queue + this task is sent to. + + Should be replaced with :class:`@amqp.TaskConsumer` instead: + + """ + Q = self._get_app().amqp + connection = connection or self.establish_connection() + if queues is None: + queues = Q.queues[self.queue] if self.queue else Q.default_queue + return Q.TaskConsumer(connection, queues, **kwargs) + + +class PeriodicTask(Task): + """A periodic task is a task that adds itself to the + :setting:`CELERYBEAT_SCHEDULE` setting.""" + abstract = True + ignore_result = True + relative = False + options = None + compat = True + + def __init__(self): + if not hasattr(self, 'run_every'): + raise NotImplementedError( + 'Periodic tasks must have a run_every attribute') + self.run_every = maybe_schedule(self.run_every, self.relative) + super(PeriodicTask, self).__init__() + + @classmethod + def on_bound(cls, app): + app.conf.CELERYBEAT_SCHEDULE[cls.name] = { + 'task': cls.name, + 'schedule': cls.run_every, + 'args': (), + 'kwargs': {}, + 'options': cls.options or {}, + 'relative': cls.relative, + } + + +def task(*args, **kwargs): + """Deprecated decorator, please use :func:`celery.task`.""" + return current_app.task(*args, **dict({'accept_magic_kwargs': False, + 'base': Task}, **kwargs)) + + +def periodic_task(*args, **options): + """Deprecated decorator, please use :setting:`CELERYBEAT_SCHEDULE`.""" + return task(**dict({'base': PeriodicTask}, **options)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/task/http.py b/thesisenv/lib/python3.6/site-packages/celery/task/http.py new file mode 100644 index 0000000..e170ec3 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/task/http.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- +""" + celery.task.http + ~~~~~~~~~~~~~~~~ + + Webhook task implementation. + +""" +from __future__ import absolute_import + +import anyjson +import sys + +try: + from urllib.parse import parse_qsl, urlencode, urlparse # Py3 +except ImportError: # pragma: no cover + from urllib import urlencode # noqa + from urlparse import urlparse, parse_qsl # noqa + +from celery import shared_task, __version__ as celery_version +from celery.five import items, reraise +from celery.utils.log import get_task_logger + +__all__ = ['InvalidResponseError', 'RemoteExecuteError', 'UnknownStatusError', + 'HttpDispatch', 'dispatch', 'URL'] + +GET_METHODS = frozenset(['GET', 'HEAD']) +logger = get_task_logger(__name__) + + +if sys.version_info[0] == 3: # pragma: no cover + + from urllib.request import Request, urlopen + + def utf8dict(tup): + if not isinstance(tup, dict): + return dict(tup) + return tup + +else: + + from urllib2 import Request, urlopen # noqa + + def utf8dict(tup): # noqa + """With a dict's items() tuple return a new dict with any utf-8 + keys/values encoded.""" + return dict( + (k.encode('utf-8'), + v.encode('utf-8') if isinstance(v, unicode) else v) # noqa + for k, v in tup) + + +class InvalidResponseError(Exception): + """The remote server gave an invalid response.""" + + +class RemoteExecuteError(Exception): + """The remote task gave a custom error.""" + + +class UnknownStatusError(InvalidResponseError): + """The remote server gave an unknown status.""" + + +def extract_response(raw_response, loads=anyjson.loads): + """Extract the response text from a raw JSON response.""" + if not raw_response: + raise InvalidResponseError('Empty response') + try: + payload = loads(raw_response) + except ValueError as exc: + reraise(InvalidResponseError, InvalidResponseError( + str(exc)), sys.exc_info()[2]) + + status = payload['status'] + if status == 'success': + return payload['retval'] + elif status == 'failure': + raise RemoteExecuteError(payload.get('reason')) + else: + raise UnknownStatusError(str(status)) + + +class MutableURL(object): + """Object wrapping a Uniform Resource Locator. + + Supports editing the query parameter list. + You can convert the object back to a string, the query will be + properly urlencoded. + + Examples + + >>> url = URL('http://www.google.com:6580/foo/bar?x=3&y=4#foo') + >>> url.query + {'x': '3', 'y': '4'} + >>> str(url) + 'http://www.google.com:6580/foo/bar?y=4&x=3#foo' + >>> url.query['x'] = 10 + >>> url.query.update({'George': 'Costanza'}) + >>> str(url) + 'http://www.google.com:6580/foo/bar?y=4&x=10&George=Costanza#foo' + + """ + def __init__(self, url): + self.parts = urlparse(url) + self.query = dict(parse_qsl(self.parts[4])) + + def __str__(self): + scheme, netloc, path, params, query, fragment = self.parts + query = urlencode(utf8dict(items(self.query))) + components = [scheme + '://', netloc, path or '/', + ';{0}'.format(params) if params else '', + '?{0}'.format(query) if query else '', + '#{0}'.format(fragment) if fragment else ''] + return ''.join(c for c in components if c) + + def __repr__(self): + return '<{0}: {1}>'.format(type(self).__name__, self) + + +class HttpDispatch(object): + """Make task HTTP request and collect the task result. + + :param url: The URL to request. + :param method: HTTP method used. Currently supported methods are `GET` + and `POST`. + :param task_kwargs: Task keyword arguments. + :param logger: Logger used for user/system feedback. + + """ + user_agent = 'celery/{version}'.format(version=celery_version) + timeout = 5 + + def __init__(self, url, method, task_kwargs, **kwargs): + self.url = url + self.method = method + self.task_kwargs = task_kwargs + self.logger = kwargs.get('logger') or logger + + def make_request(self, url, method, params): + """Perform HTTP request and return the response.""" + request = Request(url, params) + for key, val in items(self.http_headers): + request.add_header(key, val) + response = urlopen(request) # user catches errors. + return response.read() + + def dispatch(self): + """Dispatch callback and return result.""" + url = MutableURL(self.url) + params = None + if self.method in GET_METHODS: + url.query.update(self.task_kwargs) + else: + params = urlencode(utf8dict(items(self.task_kwargs))) + raw_response = self.make_request(str(url), self.method, params) + return extract_response(raw_response) + + @property + def http_headers(self): + headers = {'User-Agent': self.user_agent} + return headers + + +@shared_task(name='celery.http_dispatch', bind=True, + url=None, method=None, accept_magic_kwargs=False) +def dispatch(self, url=None, method='GET', **kwargs): + """Task dispatching to an URL. + + :keyword url: The URL location of the HTTP callback task. + :keyword method: Method to use when dispatching the callback. Usually + `GET` or `POST`. + :keyword \*\*kwargs: Keyword arguments to pass on to the HTTP callback. + + .. attribute:: url + + If this is set, this is used as the default URL for requests. + Default is to require the user of the task to supply the url as an + argument, as this attribute is intended for subclasses. + + .. attribute:: method + + If this is set, this is the default method used for requests. + Default is to require the user of the task to supply the method as an + argument, as this attribute is intended for subclasses. + + """ + return HttpDispatch( + url or self.url, method or self.method, kwargs, + ).dispatch() + + +class URL(MutableURL): + """HTTP Callback URL + + Supports requesting an URL asynchronously. + + :param url: URL to request. + :keyword dispatcher: Class used to dispatch the request. + By default this is :func:`dispatch`. + + """ + dispatcher = None + + def __init__(self, url, dispatcher=None, app=None): + super(URL, self).__init__(url) + self.app = app + self.dispatcher = dispatcher or self.dispatcher + if self.dispatcher is None: + # Get default dispatcher + self.dispatcher = ( + self.app.tasks['celery.http_dispatch'] if self.app + else dispatch + ) + + def get_async(self, **kwargs): + return self.dispatcher.delay(str(self), 'GET', **kwargs) + + def post_async(self, **kwargs): + return self.dispatcher.delay(str(self), 'POST', **kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/task/sets.py b/thesisenv/lib/python3.6/site-packages/celery/task/sets.py new file mode 100644 index 0000000..e277b79 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/task/sets.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +""" + celery.task.sets + ~~~~~~~~~~~~~~~~ + + Old ``group`` implementation, this module should + not be used anymore use :func:`celery.group` instead. + +""" +from __future__ import absolute_import + +from celery._state import get_current_worker_task +from celery.app import app_or_default +from celery.canvas import maybe_signature # noqa +from celery.utils import uuid, warn_deprecated + +from celery.canvas import subtask # noqa + +warn_deprecated( + 'celery.task.sets and TaskSet', removal='4.0', + alternative="""\ +Please use "group" instead (see the Canvas section in the userguide)\ +""") + + +class TaskSet(list): + """A task containing several subtasks, making it possible + to track how many, or when all of the tasks have been completed. + + :param tasks: A list of :class:`subtask` instances. + + Example:: + + >>> from myproj.tasks import refresh_feed + + >>> urls = ('http://cnn.com/rss', 'http://bbc.co.uk/rss') + >>> s = TaskSet(refresh_feed.s(url) for url in urls) + >>> taskset_result = s.apply_async() + >>> list_of_return_values = taskset_result.join() # *expensive* + + """ + app = None + + def __init__(self, tasks=None, app=None, Publisher=None): + self.app = app_or_default(app or self.app) + super(TaskSet, self).__init__( + maybe_signature(t, app=self.app) for t in tasks or [] + ) + self.Publisher = Publisher or self.app.amqp.TaskProducer + self.total = len(self) # XXX compat + + def apply_async(self, connection=None, publisher=None, taskset_id=None): + """Apply TaskSet.""" + app = self.app + + if app.conf.CELERY_ALWAYS_EAGER: + return self.apply(taskset_id=taskset_id) + + with app.connection_or_acquire(connection) as conn: + setid = taskset_id or uuid() + pub = publisher or self.Publisher(conn) + results = self._async_results(setid, pub) + + result = app.TaskSetResult(setid, results) + parent = get_current_worker_task() + if parent: + parent.add_trail(result) + return result + + def _async_results(self, taskset_id, publisher): + return [task.apply_async(taskset_id=taskset_id, publisher=publisher) + for task in self] + + def apply(self, taskset_id=None): + """Applies the TaskSet locally by blocking until all tasks return.""" + setid = taskset_id or uuid() + return self.app.TaskSetResult(setid, self._sync_results(setid)) + + def _sync_results(self, taskset_id): + return [task.apply(taskset_id=taskset_id) for task in self] + + @property + def tasks(self): + return self + + @tasks.setter # noqa + def tasks(self, tasks): + self[:] = tasks diff --git a/thesisenv/lib/python3.6/site-packages/celery/task/trace.py b/thesisenv/lib/python3.6/site-packages/celery/task/trace.py new file mode 100644 index 0000000..43f19cb --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/task/trace.py @@ -0,0 +1,12 @@ +"""This module has moved to celery.app.trace.""" +from __future__ import absolute_import + +import sys + +from celery.app import trace +from celery.utils import warn_deprecated + +warn_deprecated('celery.task.trace', removal='3.2', + alternative='Please use celery.app.trace instead.') + +sys.modules[__name__] = trace diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/__init__.py new file mode 100644 index 0000000..9667872 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/__init__.py @@ -0,0 +1,87 @@ +from __future__ import absolute_import + +import logging +import os +import sys +import warnings + +from importlib import import_module + +try: + WindowsError = WindowsError # noqa +except NameError: + + class WindowsError(Exception): + pass + + +def setup(): + os.environ.update( + # warn if config module not found + C_WNOCONF='yes', + KOMBU_DISABLE_LIMIT_PROTECTION='yes', + ) + + if os.environ.get('COVER_ALL_MODULES') or '--with-coverage' in sys.argv: + from warnings import catch_warnings + with catch_warnings(record=True): + import_all_modules() + warnings.resetwarnings() + from celery.tests.case import Trap + from celery._state import set_default_app + set_default_app(Trap()) + + +def teardown(): + # Don't want SUBDEBUG log messages at finalization. + try: + from multiprocessing.util import get_logger + except ImportError: + pass + else: + get_logger().setLevel(logging.WARNING) + + # Make sure test database is removed. + import os + if os.path.exists('test.db'): + try: + os.remove('test.db') + except WindowsError: + pass + + # Make sure there are no remaining threads at shutdown. + import threading + remaining_threads = [thread for thread in threading.enumerate() + if thread.getName() != 'MainThread'] + if remaining_threads: + sys.stderr.write( + '\n\n**WARNING**: Remaining threads at teardown: %r...\n' % ( + remaining_threads)) + + +def find_distribution_modules(name=__name__, file=__file__): + current_dist_depth = len(name.split('.')) - 1 + current_dist = os.path.join(os.path.dirname(file), + *([os.pardir] * current_dist_depth)) + abs = os.path.abspath(current_dist) + dist_name = os.path.basename(abs) + + for dirpath, dirnames, filenames in os.walk(abs): + package = (dist_name + dirpath[len(abs):]).replace('/', '.') + if '__init__.py' in filenames: + yield package + for filename in filenames: + if filename.endswith('.py') and filename != '__init__.py': + yield '.'.join([package, filename])[:-3] + + +def import_all_modules(name=__name__, file=__file__, + skip=('celery.decorators', + 'celery.contrib.batches', + 'celery.task')): + for module in find_distribution_modules(name, file): + if not module.startswith(skip): + try: + import_module(module) + except ImportError: + pass diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_amqp.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_amqp.py new file mode 100644 index 0000000..efb398a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_amqp.py @@ -0,0 +1,228 @@ +from __future__ import absolute_import + +import datetime + +import pytz + +from kombu import Exchange, Queue + +from celery.app.amqp import Queues, TaskPublisher +from celery.five import keys +from celery.tests.case import AppCase, Mock + + +class test_TaskProducer(AppCase): + + def test__exit__(self): + publisher = self.app.amqp.TaskProducer(self.app.connection()) + publisher.release = Mock() + with publisher: + pass + publisher.release.assert_called_with() + + def test_declare(self): + publisher = self.app.amqp.TaskProducer(self.app.connection()) + publisher.exchange.name = 'foo' + publisher.declare() + publisher.exchange.name = None + publisher.declare() + + def test_retry_policy(self): + prod = self.app.amqp.TaskProducer(Mock()) + prod.channel.connection.client.declared_entities = set() + prod.publish_task('tasks.add', (2, 2), {}, + retry_policy={'frobulate': 32.4}) + + def test_publish_no_retry(self): + prod = self.app.amqp.TaskProducer(Mock()) + prod.channel.connection.client.declared_entities = set() + prod.publish_task('tasks.add', (2, 2), {}, retry=False, chord=123) + self.assertFalse(prod.connection.ensure.call_count) + + def test_publish_custom_queue(self): + prod = self.app.amqp.TaskProducer(Mock()) + self.app.amqp.queues['some_queue'] = Queue( + 'xxx', Exchange('yyy'), 'zzz', + ) + prod.channel.connection.client.declared_entities = set() + prod.publish = Mock() + prod.publish_task('tasks.add', (8, 8), {}, retry=False, + queue='some_queue') + self.assertEqual(prod.publish.call_args[1]['exchange'], 'yyy') + self.assertEqual(prod.publish.call_args[1]['routing_key'], 'zzz') + + def test_publish_with_countdown(self): + prod = self.app.amqp.TaskProducer(Mock()) + prod.channel.connection.client.declared_entities = set() + prod.publish = Mock() + now = datetime.datetime(2013, 11, 26, 16, 48, 46) + prod.publish_task('tasks.add', (1, 1), {}, retry=False, + countdown=10, now=now) + self.assertEqual( + prod.publish.call_args[0][0]['eta'], + '2013-11-26T16:48:56+00:00', + ) + + def test_publish_with_countdown_and_timezone(self): + # use timezone with fixed offset to be sure it won't be changed + self.app.conf.CELERY_TIMEZONE = pytz.FixedOffset(120) + prod = self.app.amqp.TaskProducer(Mock()) + prod.channel.connection.client.declared_entities = set() + prod.publish = Mock() + now = datetime.datetime(2013, 11, 26, 16, 48, 46) + prod.publish_task('tasks.add', (2, 2), {}, retry=False, + countdown=20, now=now) + self.assertEqual( + prod.publish.call_args[0][0]['eta'], + '2013-11-26T18:49:06+02:00', + ) + + def test_event_dispatcher(self): + prod = self.app.amqp.TaskProducer(Mock()) + self.assertTrue(prod.event_dispatcher) + self.assertFalse(prod.event_dispatcher.enabled) + + +class test_TaskConsumer(AppCase): + + def test_accept_content(self): + with self.app.pool.acquire(block=True) as conn: + self.app.conf.CELERY_ACCEPT_CONTENT = ['application/json'] + self.assertEqual( + self.app.amqp.TaskConsumer(conn).accept, + set(['application/json']) + ) + self.assertEqual( + self.app.amqp.TaskConsumer(conn, accept=['json']).accept, + set(['application/json']), + ) + + +class test_compat_TaskPublisher(AppCase): + + def test_compat_exchange_is_string(self): + producer = TaskPublisher(exchange='foo', app=self.app) + self.assertIsInstance(producer.exchange, Exchange) + self.assertEqual(producer.exchange.name, 'foo') + self.assertEqual(producer.exchange.type, 'direct') + producer = TaskPublisher(exchange='foo', exchange_type='topic', + app=self.app) + self.assertEqual(producer.exchange.type, 'topic') + + def test_compat_exchange_is_Exchange(self): + producer = TaskPublisher(exchange=Exchange('foo'), app=self.app) + self.assertEqual(producer.exchange.name, 'foo') + + +class test_PublisherPool(AppCase): + + def test_setup_nolimit(self): + self.app.conf.BROKER_POOL_LIMIT = None + try: + delattr(self.app, '_pool') + except AttributeError: + pass + self.app.amqp._producer_pool = None + pool = self.app.amqp.producer_pool + self.assertEqual(pool.limit, self.app.pool.limit) + self.assertFalse(pool._resource.queue) + + r1 = pool.acquire() + r2 = pool.acquire() + r1.release() + r2.release() + r1 = pool.acquire() + r2 = pool.acquire() + + def test_setup(self): + self.app.conf.BROKER_POOL_LIMIT = 2 + try: + delattr(self.app, '_pool') + except AttributeError: + pass + self.app.amqp._producer_pool = None + pool = self.app.amqp.producer_pool + self.assertEqual(pool.limit, self.app.pool.limit) + self.assertTrue(pool._resource.queue) + + p1 = r1 = pool.acquire() + p2 = r2 = pool.acquire() + r1.release() + r2.release() + r1 = pool.acquire() + r2 = pool.acquire() + self.assertIs(p2, r1) + self.assertIs(p1, r2) + r1.release() + r2.release() + + +class test_Queues(AppCase): + + def test_queues_format(self): + self.app.amqp.queues._consume_from = {} + self.assertEqual(self.app.amqp.queues.format(), '') + + def test_with_defaults(self): + self.assertEqual(Queues(None), {}) + + def test_add(self): + q = Queues() + q.add('foo', exchange='ex', routing_key='rk') + self.assertIn('foo', q) + self.assertIsInstance(q['foo'], Queue) + self.assertEqual(q['foo'].routing_key, 'rk') + + def test_with_ha_policy(self): + qn = Queues(ha_policy=None, create_missing=False) + qn.add('xyz') + self.assertIsNone(qn['xyz'].queue_arguments) + + qn.add('xyx', queue_arguments={'x-foo': 'bar'}) + self.assertEqual(qn['xyx'].queue_arguments, {'x-foo': 'bar'}) + + q = Queues(ha_policy='all', create_missing=False) + q.add(Queue('foo')) + self.assertEqual(q['foo'].queue_arguments, {'x-ha-policy': 'all'}) + + qq = Queue('xyx2', queue_arguments={'x-foo': 'bari'}) + q.add(qq) + self.assertEqual(q['xyx2'].queue_arguments, { + 'x-ha-policy': 'all', + 'x-foo': 'bari', + }) + + q2 = Queues(ha_policy=['A', 'B', 'C'], create_missing=False) + q2.add(Queue('foo')) + self.assertEqual(q2['foo'].queue_arguments, { + 'x-ha-policy': 'nodes', + 'x-ha-policy-params': ['A', 'B', 'C'], + }) + + def test_select_add(self): + q = Queues() + q.select(['foo', 'bar']) + q.select_add('baz') + self.assertItemsEqual(keys(q._consume_from), ['foo', 'bar', 'baz']) + + def test_deselect(self): + q = Queues() + q.select(['foo', 'bar']) + q.deselect('bar') + self.assertItemsEqual(keys(q._consume_from), ['foo']) + + def test_with_ha_policy_compat(self): + q = Queues(ha_policy='all') + q.add('bar') + self.assertEqual(q['bar'].queue_arguments, {'x-ha-policy': 'all'}) + + def test_add_default_exchange(self): + ex = Exchange('fff', 'fanout') + q = Queues(default_exchange=ex) + q.add(Queue('foo')) + self.assertEqual(q['foo'].exchange, ex) + + def test_alias(self): + q = Queues() + q.add(Queue('foo', alias='barfoo')) + self.assertIs(q['barfoo'], q['foo']) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_annotations.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_annotations.py new file mode 100644 index 0000000..559f5cb --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_annotations.py @@ -0,0 +1,56 @@ +from __future__ import absolute_import + +from celery.app.annotations import MapAnnotation, prepare +from celery.utils.imports import qualname + +from celery.tests.case import AppCase + + +class MyAnnotation(object): + foo = 65 + + +class AnnotationCase(AppCase): + + def setup(self): + @self.app.task(shared=False) + def add(x, y): + return x + y + self.add = add + + @self.app.task(shared=False) + def mul(x, y): + return x * y + self.mul = mul + + +class test_MapAnnotation(AnnotationCase): + + def test_annotate(self): + x = MapAnnotation({self.add.name: {'foo': 1}}) + self.assertDictEqual(x.annotate(self.add), {'foo': 1}) + self.assertIsNone(x.annotate(self.mul)) + + def test_annotate_any(self): + x = MapAnnotation({'*': {'foo': 2}}) + self.assertDictEqual(x.annotate_any(), {'foo': 2}) + + x = MapAnnotation() + self.assertIsNone(x.annotate_any()) + + +class test_prepare(AnnotationCase): + + def test_dict_to_MapAnnotation(self): + x = prepare({self.add.name: {'foo': 3}}) + self.assertIsInstance(x[0], MapAnnotation) + + def test_returns_list(self): + self.assertListEqual(prepare(1), [1]) + self.assertListEqual(prepare([1]), [1]) + self.assertListEqual(prepare((1, )), [1]) + self.assertEqual(prepare(None), ()) + + def test_evalutes_qualnames(self): + self.assertEqual(prepare(qualname(MyAnnotation))[0]().foo, 65) + self.assertEqual(prepare([qualname(MyAnnotation)])[0]().foo, 65) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_app.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_app.py new file mode 100644 index 0000000..9d260c6 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_app.py @@ -0,0 +1,726 @@ +from __future__ import absolute_import + +import gc +import os +import itertools + +from copy import deepcopy +from pickle import loads, dumps + +from amqp import promise +from kombu import Exchange + +from celery import shared_task, current_app +from celery import app as _app +from celery import _state +from celery.app import base as _appbase +from celery.app import defaults +from celery.exceptions import ImproperlyConfigured +from celery.five import items +from celery.loaders.base import BaseLoader +from celery.platforms import pyimplementation +from celery.utils.serialization import pickle + +from celery.tests.case import ( + CELERY_TEST_CONFIG, + AppCase, + Mock, + depends_on_current_app, + mask_modules, + patch, + platform_pyimp, + sys_platform, + pypy_version, + with_environ, +) +from celery.utils import uuid +from celery.utils.mail import ErrorMail + +THIS_IS_A_KEY = 'this is a value' + + +class ObjectConfig(object): + FOO = 1 + BAR = 2 + +object_config = ObjectConfig() +dict_config = dict(FOO=10, BAR=20) + + +class ObjectConfig2(object): + LEAVE_FOR_WORK = True + MOMENT_TO_STOP = True + CALL_ME_BACK = 123456789 + WANT_ME_TO = False + UNDERSTAND_ME = True + + +class Object(object): + + def __init__(self, **kwargs): + for key, value in items(kwargs): + setattr(self, key, value) + + +def _get_test_config(): + return deepcopy(CELERY_TEST_CONFIG) +test_config = _get_test_config() + + +class test_module(AppCase): + + def test_default_app(self): + self.assertEqual(_app.default_app, _state.default_app) + + def test_bugreport(self): + self.assertTrue(_app.bugreport(app=self.app)) + + +class test_App(AppCase): + + def setup(self): + self.app.add_defaults(test_config) + + def test_task_autofinalize_disabled(self): + with self.Celery('xyzibari', autofinalize=False) as app: + @app.task + def ttafd(): + return 42 + + with self.assertRaises(RuntimeError): + ttafd() + + with self.Celery('xyzibari', autofinalize=False) as app: + @app.task + def ttafd2(): + return 42 + + app.finalize() + self.assertEqual(ttafd2(), 42) + + def test_registry_autofinalize_disabled(self): + with self.Celery('xyzibari', autofinalize=False) as app: + with self.assertRaises(RuntimeError): + app.tasks['celery.chain'] + app.finalize() + self.assertTrue(app.tasks['celery.chain']) + + def test_task(self): + with self.Celery('foozibari') as app: + + def fun(): + pass + + fun.__module__ = '__main__' + task = app.task(fun) + self.assertEqual(task.name, app.main + '.fun') + + def test_with_config_source(self): + with self.Celery(config_source=ObjectConfig) as app: + self.assertEqual(app.conf.FOO, 1) + self.assertEqual(app.conf.BAR, 2) + + @depends_on_current_app + def test_task_windows_execv(self): + prev, _appbase._EXECV = _appbase._EXECV, True + try: + + @self.app.task(shared=False) + def foo(): + pass + + self.assertTrue(foo._get_current_object()) # is proxy + + finally: + _appbase._EXECV = prev + assert not _appbase._EXECV + + def test_task_takes_no_args(self): + with self.assertRaises(TypeError): + @self.app.task(1) + def foo(): + pass + + def test_add_defaults(self): + self.assertFalse(self.app.configured) + _conf = {'FOO': 300} + + def conf(): + return _conf + + self.app.add_defaults(conf) + self.assertIn(conf, self.app._pending_defaults) + self.assertFalse(self.app.configured) + self.assertEqual(self.app.conf.FOO, 300) + self.assertTrue(self.app.configured) + self.assertFalse(self.app._pending_defaults) + + # defaults not pickled + appr = loads(dumps(self.app)) + with self.assertRaises(AttributeError): + appr.conf.FOO + + # add more defaults after configured + conf2 = {'FOO': 'BAR'} + self.app.add_defaults(conf2) + self.assertEqual(self.app.conf.FOO, 'BAR') + + self.assertIn(_conf, self.app.conf.defaults) + self.assertIn(conf2, self.app.conf.defaults) + + def test_connection_or_acquire(self): + with self.app.connection_or_acquire(block=True): + self.assertTrue(self.app.pool._dirty) + + with self.app.connection_or_acquire(pool=False): + self.assertFalse(self.app.pool._dirty) + + def test_maybe_close_pool(self): + cpool = self.app._pool = Mock() + amqp = self.app.__dict__['amqp'] = Mock() + ppool = amqp._producer_pool + self.app._maybe_close_pool() + cpool.force_close_all.assert_called_with() + ppool.force_close_all.assert_called_with() + self.assertIsNone(self.app._pool) + self.assertIsNone(self.app.__dict__['amqp']._producer_pool) + + self.app._pool = Mock() + self.app._maybe_close_pool() + self.app._maybe_close_pool() + + def test_using_v1_reduce(self): + self.app._using_v1_reduce = True + self.assertTrue(loads(dumps(self.app))) + + def test_autodiscover_tasks_force(self): + self.app.loader.autodiscover_tasks = Mock() + self.app.autodiscover_tasks(['proj.A', 'proj.B'], force=True) + self.app.loader.autodiscover_tasks.assert_called_with( + ['proj.A', 'proj.B'], 'tasks', + ) + self.app.loader.autodiscover_tasks = Mock() + self.app.autodiscover_tasks( + lambda: ['proj.A', 'proj.B'], + related_name='george', + force=True, + ) + self.app.loader.autodiscover_tasks.assert_called_with( + ['proj.A', 'proj.B'], 'george', + ) + + def test_autodiscover_tasks_lazy(self): + with patch('celery.signals.import_modules') as import_modules: + + def packages(): + return [1, 2, 3] + + self.app.autodiscover_tasks(packages) + self.assertTrue(import_modules.connect.called) + prom = import_modules.connect.call_args[0][0] + self.assertIsInstance(prom, promise) + self.assertEqual(prom.fun, self.app._autodiscover_tasks) + self.assertEqual(prom.args[0](), [1, 2, 3]) + + @with_environ('CELERY_BROKER_URL', '') + def test_with_broker(self): + with self.Celery(broker='foo://baribaz') as app: + self.assertEqual(app.conf.BROKER_URL, 'foo://baribaz') + + def test_repr(self): + self.assertTrue(repr(self.app)) + + def test_custom_task_registry(self): + with self.Celery(tasks=self.app.tasks) as app2: + self.assertIs(app2.tasks, self.app.tasks) + + def test_include_argument(self): + with self.Celery(include=('foo', 'bar.foo')) as app: + self.assertEqual(app.conf.CELERY_IMPORTS, ('foo', 'bar.foo')) + + def test_set_as_current(self): + current = _state._tls.current_app + try: + app = self.Celery(set_as_current=True) + self.assertIs(_state._tls.current_app, app) + finally: + _state._tls.current_app = current + + def test_current_task(self): + @self.app.task + def foo(shared=False): + pass + + _state._task_stack.push(foo) + try: + self.assertEqual(self.app.current_task.name, foo.name) + finally: + _state._task_stack.pop() + + def test_task_not_shared(self): + with patch('celery.app.base.connect_on_app_finalize') as sh: + @self.app.task(shared=False) + def foo(): + pass + self.assertFalse(sh.called) + + def test_task_compat_with_filter(self): + with self.Celery(accept_magic_kwargs=True) as app: + check = Mock() + + def filter(task): + check(task) + return task + + @app.task(filter=filter, shared=False) + def foo(): + pass + check.assert_called_with(foo) + + def test_task_with_filter(self): + with self.Celery(accept_magic_kwargs=False) as app: + check = Mock() + + def filter(task): + check(task) + return task + + assert not _appbase._EXECV + + @app.task(filter=filter, shared=False) + def foo(): + pass + check.assert_called_with(foo) + + def test_task_sets_main_name_MP_MAIN_FILE(self): + from celery import utils as _utils + _utils.MP_MAIN_FILE = __file__ + try: + with self.Celery('xuzzy') as app: + + @app.task + def foo(): + pass + + self.assertEqual(foo.name, 'xuzzy.foo') + finally: + _utils.MP_MAIN_FILE = None + + def test_annotate_decorator(self): + from celery.app.task import Task + + class adX(Task): + abstract = True + + def run(self, y, z, x): + return y, z, x + + check = Mock() + + def deco(fun): + + def _inner(*args, **kwargs): + check(*args, **kwargs) + return fun(*args, **kwargs) + return _inner + + self.app.conf.CELERY_ANNOTATIONS = { + adX.name: {'@__call__': deco} + } + adX.bind(self.app) + self.assertIs(adX.app, self.app) + + i = adX() + i(2, 4, x=3) + check.assert_called_with(i, 2, 4, x=3) + + i.annotate() + i.annotate() + + def test_apply_async_has__self__(self): + @self.app.task(__self__='hello', shared=False) + def aawsX(): + pass + + with patch('celery.app.amqp.TaskProducer.publish_task') as dt: + aawsX.apply_async((4, 5)) + args = dt.call_args[0][1] + self.assertEqual(args, ('hello', 4, 5)) + + def test_apply_async_adds_children(self): + from celery._state import _task_stack + + @self.app.task(shared=False) + def a3cX1(self): + pass + + @self.app.task(shared=False) + def a3cX2(self): + pass + + _task_stack.push(a3cX1) + try: + a3cX1.push_request(called_directly=False) + try: + res = a3cX2.apply_async(add_to_parent=True) + self.assertIn(res, a3cX1.request.children) + finally: + a3cX1.pop_request() + finally: + _task_stack.pop() + + def test_pickle_app(self): + changes = dict(THE_FOO_BAR='bars', + THE_MII_MAR='jars') + self.app.conf.update(changes) + saved = pickle.dumps(self.app) + self.assertLess(len(saved), 2048) + restored = pickle.loads(saved) + self.assertDictContainsSubset(changes, restored.conf) + + def test_worker_main(self): + from celery.bin import worker as worker_bin + + class worker(worker_bin.worker): + + def execute_from_commandline(self, argv): + return argv + + prev, worker_bin.worker = worker_bin.worker, worker + try: + ret = self.app.worker_main(argv=['--version']) + self.assertListEqual(ret, ['--version']) + finally: + worker_bin.worker = prev + + def test_config_from_envvar(self): + os.environ['CELERYTEST_CONFIG_OBJECT'] = 'celery.tests.app.test_app' + self.app.config_from_envvar('CELERYTEST_CONFIG_OBJECT') + self.assertEqual(self.app.conf.THIS_IS_A_KEY, 'this is a value') + + def assert_config2(self): + self.assertTrue(self.app.conf.LEAVE_FOR_WORK) + self.assertTrue(self.app.conf.MOMENT_TO_STOP) + self.assertEqual(self.app.conf.CALL_ME_BACK, 123456789) + self.assertFalse(self.app.conf.WANT_ME_TO) + self.assertTrue(self.app.conf.UNDERSTAND_ME) + + def test_config_from_object__lazy(self): + conf = ObjectConfig2() + self.app.config_from_object(conf) + self.assertFalse(self.app.loader._conf) + self.assertIs(self.app._config_source, conf) + + self.assert_config2() + + def test_config_from_object__force(self): + self.app.config_from_object(ObjectConfig2(), force=True) + self.assertTrue(self.app.loader._conf) + + self.assert_config2() + + def test_config_from_cmdline(self): + cmdline = ['.always_eager=no', + '.result_backend=/dev/null', + 'celeryd.prefetch_multiplier=368', + '.foobarstring=(string)300', + '.foobarint=(int)300', + '.result_engine_options=(dict){"foo": "bar"}'] + self.app.config_from_cmdline(cmdline, namespace='celery') + self.assertFalse(self.app.conf.CELERY_ALWAYS_EAGER) + self.assertEqual(self.app.conf.CELERY_RESULT_BACKEND, '/dev/null') + self.assertEqual(self.app.conf.CELERYD_PREFETCH_MULTIPLIER, 368) + self.assertEqual(self.app.conf.CELERY_FOOBARSTRING, '300') + self.assertEqual(self.app.conf.CELERY_FOOBARINT, 300) + self.assertDictEqual(self.app.conf.CELERY_RESULT_ENGINE_OPTIONS, + {'foo': 'bar'}) + + def test_compat_setting_CELERY_BACKEND(self): + self.app._preconf = {} # removes result backend set by AppCase + self.app.config_from_object(Object(CELERY_BACKEND='set_by_us')) + self.assertEqual(self.app.conf.CELERY_RESULT_BACKEND, 'set_by_us') + + def test_setting_BROKER_TRANSPORT_OPTIONS(self): + + _args = {'foo': 'bar', 'spam': 'baz'} + + self.app.config_from_object(Object()) + self.assertEqual(self.app.conf.BROKER_TRANSPORT_OPTIONS, {}) + + self.app.config_from_object(Object(BROKER_TRANSPORT_OPTIONS=_args)) + self.assertEqual(self.app.conf.BROKER_TRANSPORT_OPTIONS, _args) + + def test_Windows_log_color_disabled(self): + self.app.IS_WINDOWS = True + self.assertFalse(self.app.log.supports_color(True)) + + def test_compat_setting_CARROT_BACKEND(self): + self.app.config_from_object(Object(CARROT_BACKEND='set_by_us')) + self.assertEqual(self.app.conf.BROKER_TRANSPORT, 'set_by_us') + + def test_WorkController(self): + x = self.app.WorkController + self.assertIs(x.app, self.app) + + def test_Worker(self): + x = self.app.Worker + self.assertIs(x.app, self.app) + + @depends_on_current_app + def test_AsyncResult(self): + x = self.app.AsyncResult('1') + self.assertIs(x.app, self.app) + r = loads(dumps(x)) + # not set as current, so ends up as default app after reduce + self.assertIs(r.app, current_app._get_current_object()) + + def test_get_active_apps(self): + self.assertTrue(list(_state._get_active_apps())) + + app1 = self.Celery() + appid = id(app1) + self.assertIn(app1, _state._get_active_apps()) + app1.close() + del(app1) + + gc.collect() + + # weakref removed from list when app goes out of scope. + with self.assertRaises(StopIteration): + next(app for app in _state._get_active_apps() if id(app) == appid) + + def test_config_from_envvar_more(self, key='CELERY_HARNESS_CFG1'): + self.assertFalse( + self.app.config_from_envvar( + 'HDSAJIHWIQHEWQU', force=True, silent=True), + ) + with self.assertRaises(ImproperlyConfigured): + self.app.config_from_envvar( + 'HDSAJIHWIQHEWQU', force=True, silent=False, + ) + os.environ[key] = __name__ + '.object_config' + self.assertTrue(self.app.config_from_envvar(key, force=True)) + self.assertEqual(self.app.conf['FOO'], 1) + self.assertEqual(self.app.conf['BAR'], 2) + + os.environ[key] = 'unknown_asdwqe.asdwqewqe' + with self.assertRaises(ImportError): + self.app.config_from_envvar(key, silent=False) + self.assertFalse( + self.app.config_from_envvar(key, force=True, silent=True), + ) + + os.environ[key] = __name__ + '.dict_config' + self.assertTrue(self.app.config_from_envvar(key, force=True)) + self.assertEqual(self.app.conf['FOO'], 10) + self.assertEqual(self.app.conf['BAR'], 20) + + @patch('celery.bin.celery.CeleryCommand.execute_from_commandline') + def test_start(self, execute): + self.app.start() + self.assertTrue(execute.called) + + def test_mail_admins(self): + + class Loader(BaseLoader): + + def mail_admins(*args, **kwargs): + return args, kwargs + + self.app.loader = Loader(app=self.app) + self.app.conf.ADMINS = None + self.assertFalse(self.app.mail_admins('Subject', 'Body')) + self.app.conf.ADMINS = [('George Costanza', 'george@vandelay.com')] + self.assertTrue(self.app.mail_admins('Subject', 'Body')) + + def test_amqp_get_broker_info(self): + self.assertDictContainsSubset( + {'hostname': 'localhost', + 'userid': 'guest', + 'password': 'guest', + 'virtual_host': '/'}, + self.app.connection('pyamqp://').info(), + ) + self.app.conf.BROKER_PORT = 1978 + self.app.conf.BROKER_VHOST = 'foo' + self.assertDictContainsSubset( + {'port': 1978, 'virtual_host': 'foo'}, + self.app.connection('pyamqp://:1978/foo').info(), + ) + conn = self.app.connection('pyamqp:////value') + self.assertDictContainsSubset({'virtual_host': '/value'}, + conn.info()) + + def test_amqp_failover_strategy_selection(self): + # Test passing in a string and make sure the string + # gets there untouched + self.app.conf.BROKER_FAILOVER_STRATEGY = 'foo-bar' + self.assertEqual( + self.app.connection('amqp:////value').failover_strategy, + 'foo-bar', + ) + + # Try passing in None + self.app.conf.BROKER_FAILOVER_STRATEGY = None + self.assertEqual( + self.app.connection('amqp:////value').failover_strategy, + itertools.cycle, + ) + + # Test passing in a method + def my_failover_strategy(it): + yield True + + self.app.conf.BROKER_FAILOVER_STRATEGY = my_failover_strategy + self.assertEqual( + self.app.connection('amqp:////value').failover_strategy, + my_failover_strategy, + ) + + def test_BROKER_BACKEND_alias(self): + self.assertEqual(self.app.conf.BROKER_BACKEND, + self.app.conf.BROKER_TRANSPORT) + + def test_after_fork(self): + p = self.app._pool = Mock() + self.app._after_fork(self.app) + p.force_close_all.assert_called_with() + self.assertIsNone(self.app._pool) + self.app._after_fork(self.app) + + def test_pool_no_multiprocessing(self): + with mask_modules('multiprocessing.util'): + pool = self.app.pool + self.assertIs(pool, self.app._pool) + + def test_bugreport(self): + self.assertTrue(self.app.bugreport()) + + def test_send_task_sent_event(self): + + class Dispatcher(object): + sent = [] + + def publish(self, type, fields, *args, **kwargs): + self.sent.append((type, fields)) + + conn = self.app.connection() + chan = conn.channel() + try: + for e in ('foo_exchange', 'moo_exchange', 'bar_exchange'): + chan.exchange_declare(e, 'direct', durable=True) + chan.queue_declare(e, durable=True) + chan.queue_bind(e, e, e) + finally: + chan.close() + assert conn.transport_cls == 'memory' + + prod = self.app.amqp.TaskProducer( + conn, exchange=Exchange('foo_exchange'), + send_sent_event=True, + ) + + dispatcher = Dispatcher() + self.assertTrue(prod.publish_task('footask', (), {}, + exchange='moo_exchange', + routing_key='moo_exchange', + event_dispatcher=dispatcher)) + self.assertTrue(dispatcher.sent) + self.assertEqual(dispatcher.sent[0][0], 'task-sent') + self.assertTrue(prod.publish_task('footask', (), {}, + event_dispatcher=dispatcher, + exchange='bar_exchange', + routing_key='bar_exchange')) + + def test_error_mail_sender(self): + x = ErrorMail.subject % {'name': 'task_name', + 'id': uuid(), + 'exc': 'FOOBARBAZ', + 'hostname': 'lana'} + self.assertTrue(x) + + def test_error_mail_disabled(self): + task = Mock() + x = ErrorMail(task) + x.should_send = Mock() + x.should_send.return_value = False + x.send(Mock(), Mock()) + self.assertFalse(task.app.mail_admins.called) + + +class test_defaults(AppCase): + + def test_strtobool(self): + for s in ('false', 'no', '0'): + self.assertFalse(defaults.strtobool(s)) + for s in ('true', 'yes', '1'): + self.assertTrue(defaults.strtobool(s)) + with self.assertRaises(TypeError): + defaults.strtobool('unsure') + + +class test_debugging_utils(AppCase): + + def test_enable_disable_trace(self): + try: + _app.enable_trace() + self.assertEqual(_app.app_or_default, _app._app_or_default_trace) + _app.disable_trace() + self.assertEqual(_app.app_or_default, _app._app_or_default) + finally: + _app.disable_trace() + + +class test_pyimplementation(AppCase): + + def test_platform_python_implementation(self): + with platform_pyimp(lambda: 'Xython'): + self.assertEqual(pyimplementation(), 'Xython') + + def test_platform_jython(self): + with platform_pyimp(): + with sys_platform('java 1.6.51'): + self.assertIn('Jython', pyimplementation()) + + def test_platform_pypy(self): + with platform_pyimp(): + with sys_platform('darwin'): + with pypy_version((1, 4, 3)): + self.assertIn('PyPy', pyimplementation()) + with pypy_version((1, 4, 3, 'a4')): + self.assertIn('PyPy', pyimplementation()) + + def test_platform_fallback(self): + with platform_pyimp(): + with sys_platform('darwin'): + with pypy_version(): + self.assertEqual('CPython', pyimplementation()) + + +class test_shared_task(AppCase): + + def test_registers_to_all_apps(self): + with self.Celery('xproj', set_as_current=True) as xproj: + xproj.finalize() + + @shared_task + def foo(): + return 42 + + @shared_task() + def bar(): + return 84 + + self.assertIs(foo.app, xproj) + self.assertIs(bar.app, xproj) + self.assertTrue(foo._get_current_object()) + + with self.Celery('yproj', set_as_current=True) as yproj: + self.assertIs(foo.app, yproj) + self.assertIs(bar.app, yproj) + + @shared_task() + def baz(): + return 168 + + self.assertIs(baz.app, yproj) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_beat.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_beat.py new file mode 100644 index 0000000..67e4f53 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_beat.py @@ -0,0 +1,539 @@ +from __future__ import absolute_import + +import errno + +from datetime import datetime, timedelta +from pickle import dumps, loads + +from celery import beat +from celery.five import keys, string_t +from celery.schedules import schedule +from celery.utils import uuid +from celery.tests.case import AppCase, Mock, SkipTest, call, patch + + +class Object(object): + pass + + +class MockShelve(dict): + closed = False + synced = False + + def close(self): + self.closed = True + + def sync(self): + self.synced = True + + +class MockService(object): + started = False + stopped = False + + def __init__(self, *args, **kwargs): + pass + + def start(self, **kwargs): + self.started = True + + def stop(self, **kwargs): + self.stopped = True + + +class test_ScheduleEntry(AppCase): + Entry = beat.ScheduleEntry + + def create_entry(self, **kwargs): + entry = dict( + name='celery.unittest.add', + schedule=timedelta(seconds=10), + args=(2, 2), + options={'routing_key': 'cpu'}, + app=self.app, + ) + return self.Entry(**dict(entry, **kwargs)) + + def test_next(self): + entry = self.create_entry(schedule=10) + self.assertTrue(entry.last_run_at) + self.assertIsInstance(entry.last_run_at, datetime) + self.assertEqual(entry.total_run_count, 0) + + next_run_at = entry.last_run_at + timedelta(seconds=10) + next_entry = entry.next(next_run_at) + self.assertGreaterEqual(next_entry.last_run_at, next_run_at) + self.assertEqual(next_entry.total_run_count, 1) + + def test_is_due(self): + entry = self.create_entry(schedule=timedelta(seconds=10)) + self.assertIs(entry.app, self.app) + self.assertIs(entry.schedule.app, self.app) + due1, next_time_to_run1 = entry.is_due() + self.assertFalse(due1) + self.assertGreater(next_time_to_run1, 9) + + next_run_at = entry.last_run_at - timedelta(seconds=10) + next_entry = entry.next(next_run_at) + due2, next_time_to_run2 = next_entry.is_due() + self.assertTrue(due2) + self.assertGreater(next_time_to_run2, 9) + + def test_repr(self): + entry = self.create_entry() + self.assertIn(' 1: + return s.sh + raise OSError() + opens.side_effect = effect + s.setup_schedule() + s._remove_db.assert_called_with() + + s._store = {'__version__': 1} + s.setup_schedule() + + s._store.clear = Mock() + op = s.persistence.open = Mock() + op.return_value = s._store + s._store['tz'] = 'FUNKY' + s.setup_schedule() + op.assert_called_with(s.schedule_filename, writeback=True) + s._store.clear.assert_called_with() + s._store['utc_enabled'] = False + s._store.clear = Mock() + s.setup_schedule() + s._store.clear.assert_called_with() + + def test_get_schedule(self): + s = create_persistent_scheduler()[0]( + schedule_filename='schedule', app=self.app, + ) + s._store = {'entries': {}} + s.schedule = {'foo': 'bar'} + self.assertDictEqual(s.schedule, {'foo': 'bar'}) + self.assertDictEqual(s._store['entries'], s.schedule) + + +class test_Service(AppCase): + + def get_service(self): + Scheduler, mock_shelve = create_persistent_scheduler() + return beat.Service(app=self.app, scheduler_cls=Scheduler), mock_shelve + + def test_pickleable(self): + s = beat.Service(app=self.app, scheduler_cls=Mock) + self.assertTrue(loads(dumps(s))) + + def test_start(self): + s, sh = self.get_service() + schedule = s.scheduler.schedule + self.assertIsInstance(schedule, dict) + self.assertIsInstance(s.scheduler, beat.Scheduler) + scheduled = list(schedule.keys()) + for task_name in keys(sh['entries']): + self.assertIn(task_name, scheduled) + + s.sync() + self.assertTrue(sh.closed) + self.assertTrue(sh.synced) + self.assertTrue(s._is_stopped.isSet()) + s.sync() + s.stop(wait=False) + self.assertTrue(s._is_shutdown.isSet()) + s.stop(wait=True) + self.assertTrue(s._is_shutdown.isSet()) + + p = s.scheduler._store + s.scheduler._store = None + try: + s.scheduler.sync() + finally: + s.scheduler._store = p + + def test_start_embedded_process(self): + s, sh = self.get_service() + s._is_shutdown.set() + s.start(embedded_process=True) + + def test_start_thread(self): + s, sh = self.get_service() + s._is_shutdown.set() + s.start(embedded_process=False) + + def test_start_tick_raises_exit_error(self): + s, sh = self.get_service() + s.scheduler.tick_raises_exit = True + s.start() + self.assertTrue(s._is_shutdown.isSet()) + + def test_start_manages_one_tick_before_shutdown(self): + s, sh = self.get_service() + s.scheduler.shutdown_service = s + s.start() + self.assertTrue(s._is_shutdown.isSet()) + + +class test_EmbeddedService(AppCase): + + def test_start_stop_process(self): + try: + import _multiprocessing # noqa + except ImportError: + raise SkipTest('multiprocessing not available') + + from billiard.process import Process + + s = beat.EmbeddedService(self.app) + self.assertIsInstance(s, Process) + self.assertIsInstance(s.service, beat.Service) + s.service = MockService() + + class _Popen(object): + terminated = False + + def terminate(self): + self.terminated = True + + with patch('celery.platforms.close_open_fds'): + s.run() + self.assertTrue(s.service.started) + + s._popen = _Popen() + s.stop() + self.assertTrue(s.service.stopped) + self.assertTrue(s._popen.terminated) + + def test_start_stop_threaded(self): + s = beat.EmbeddedService(self.app, thread=True) + from threading import Thread + self.assertIsInstance(s, Thread) + self.assertIsInstance(s.service, beat.Service) + s.service = MockService() + + s.run() + self.assertTrue(s.service.started) + + s.stop() + self.assertTrue(s.service.stopped) + + +class test_schedule(AppCase): + + def test_maybe_make_aware(self): + x = schedule(10, app=self.app) + x.utc_enabled = True + d = x.maybe_make_aware(datetime.utcnow()) + self.assertTrue(d.tzinfo) + x.utc_enabled = False + d2 = x.maybe_make_aware(datetime.utcnow()) + self.assertIsNone(d2.tzinfo) + + def test_to_local(self): + x = schedule(10, app=self.app) + x.utc_enabled = True + d = x.to_local(datetime.utcnow()) + self.assertIsNone(d.tzinfo) + x.utc_enabled = False + d = x.to_local(datetime.utcnow()) + self.assertTrue(d.tzinfo) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_builtins.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_builtins.py new file mode 100644 index 0000000..0d04a52 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_builtins.py @@ -0,0 +1,217 @@ +from __future__ import absolute_import + +from celery import group, chord +from celery.app import builtins +from celery.canvas import Signature +from celery.five import range +from celery._state import _task_stack +from celery.tests.case import AppCase, Mock, patch + + +class BuiltinsCase(AppCase): + + def setup(self): + @self.app.task(shared=False) + def xsum(x): + return sum(x) + self.xsum = xsum + + @self.app.task(shared=False) + def add(x, y): + return x + y + self.add = add + + +class test_backend_cleanup(BuiltinsCase): + + def test_run(self): + self.app.backend.cleanup = Mock() + self.app.backend.cleanup.__name__ = 'cleanup' + cleanup_task = builtins.add_backend_cleanup_task(self.app) + cleanup_task() + self.assertTrue(self.app.backend.cleanup.called) + + +class test_map(BuiltinsCase): + + def test_run(self): + + @self.app.task(shared=False) + def map_mul(x): + return x[0] * x[1] + + res = self.app.tasks['celery.map']( + map_mul, [(2, 2), (4, 4), (8, 8)], + ) + self.assertEqual(res, [4, 16, 64]) + + +class test_starmap(BuiltinsCase): + + def test_run(self): + + @self.app.task(shared=False) + def smap_mul(x, y): + return x * y + + res = self.app.tasks['celery.starmap']( + smap_mul, [(2, 2), (4, 4), (8, 8)], + ) + self.assertEqual(res, [4, 16, 64]) + + +class test_chunks(BuiltinsCase): + + @patch('celery.canvas.chunks.apply_chunks') + def test_run(self, apply_chunks): + + @self.app.task(shared=False) + def chunks_mul(l): + return l + + self.app.tasks['celery.chunks']( + chunks_mul, [(2, 2), (4, 4), (8, 8)], 1, + ) + self.assertTrue(apply_chunks.called) + + +class test_group(BuiltinsCase): + + def setup(self): + self.task = builtins.add_group_task(self.app)() + super(test_group, self).setup() + + def test_apply_async_eager(self): + self.task.apply = Mock() + self.app.conf.CELERY_ALWAYS_EAGER = True + self.task.apply_async() + self.assertTrue(self.task.apply.called) + + def test_apply(self): + x = group([self.add.s(4, 4), self.add.s(8, 8)]) + x.name = self.task.name + res = x.apply() + self.assertEqual(res.get(), [8, 16]) + + def test_apply_async(self): + x = group([self.add.s(4, 4), self.add.s(8, 8)]) + x.apply_async() + + def test_apply_empty(self): + x = group(app=self.app) + x.apply() + res = x.apply_async() + self.assertTrue(res) + self.assertFalse(res.results) + + def test_apply_async_with_parent(self): + _task_stack.push(self.add) + try: + self.add.push_request(called_directly=False) + try: + assert not self.add.request.children + x = group([self.add.s(4, 4), self.add.s(8, 8)]) + res = x() + self.assertTrue(self.add.request.children) + self.assertIn(res, self.add.request.children) + self.assertEqual(len(self.add.request.children), 1) + finally: + self.add.pop_request() + finally: + _task_stack.pop() + + +class test_chain(BuiltinsCase): + + def setup(self): + BuiltinsCase.setup(self) + self.task = builtins.add_chain_task(self.app)() + + def test_apply_async(self): + c = self.add.s(2, 2) | self.add.s(4) | self.add.s(8) + result = c.apply_async() + self.assertTrue(result.parent) + self.assertTrue(result.parent.parent) + self.assertIsNone(result.parent.parent.parent) + + def test_group_to_chord(self): + c = ( + group(self.add.s(i, i) for i in range(5)) | + self.add.s(10) | + self.add.s(20) | + self.add.s(30) + ) + tasks, _ = c.type.prepare_steps((), c.tasks) + self.assertIsInstance(tasks[0], chord) + self.assertTrue(tasks[0].body.options['link']) + self.assertTrue(tasks[0].body.options['link'][0].options['link']) + + c2 = self.add.s(2, 2) | group(self.add.s(i, i) for i in range(10)) + tasks2, _ = c2.type.prepare_steps((), c2.tasks) + self.assertIsInstance(tasks2[1], group) + + def test_apply_options(self): + + class static(Signature): + + def clone(self, *args, **kwargs): + return self + + def s(*args, **kwargs): + return static(self.add, args, kwargs, type=self.add) + + c = s(2, 2) | s(4, 4) | s(8, 8) + r1 = c.apply_async(task_id='some_id') + self.assertEqual(r1.id, 'some_id') + + c.apply_async(group_id='some_group_id') + self.assertEqual(c.tasks[-1].options['group_id'], 'some_group_id') + + c.apply_async(chord='some_chord_id') + self.assertEqual(c.tasks[-1].options['chord'], 'some_chord_id') + + c.apply_async(link=[s(32)]) + self.assertListEqual(c.tasks[-1].options['link'], [s(32)]) + + c.apply_async(link_error=[s('error')]) + for task in c.tasks: + self.assertListEqual(task.options['link_error'], [s('error')]) + + +class test_chord(BuiltinsCase): + + def setup(self): + self.task = builtins.add_chord_task(self.app)() + super(test_chord, self).setup() + + def test_apply_async(self): + x = chord([self.add.s(i, i) for i in range(10)], body=self.xsum.s()) + r = x.apply_async() + self.assertTrue(r) + self.assertTrue(r.parent) + + def test_run_header_not_group(self): + self.task([self.add.s(i, i) for i in range(10)], self.xsum.s()) + + def test_forward_options(self): + body = self.xsum.s() + x = chord([self.add.s(i, i) for i in range(10)], body=body) + x._type = Mock() + x._type.app.conf.CELERY_ALWAYS_EAGER = False + x.apply_async(group_id='some_group_id') + self.assertTrue(x._type.called) + resbody = x._type.call_args[0][1] + self.assertEqual(resbody.options['group_id'], 'some_group_id') + x2 = chord([self.add.s(i, i) for i in range(10)], body=body) + x2._type = Mock() + x2._type.app.conf.CELERY_ALWAYS_EAGER = False + x2.apply_async(chord='some_chord_id') + self.assertTrue(x2._type.called) + resbody = x2._type.call_args[0][1] + self.assertEqual(resbody.options['chord'], 'some_chord_id') + + def test_apply_eager(self): + self.app.conf.CELERY_ALWAYS_EAGER = True + x = chord([self.add.s(i, i) for i in range(10)], body=self.xsum.s()) + r = x.apply_async() + self.assertEqual(r.get(), 90) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_celery.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_celery.py new file mode 100644 index 0000000..5088d35 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_celery.py @@ -0,0 +1,18 @@ +from __future__ import absolute_import +from celery.tests.case import AppCase + +import celery + + +class test_celery_package(AppCase): + + def test_version(self): + self.assertTrue(celery.VERSION) + self.assertGreaterEqual(len(celery.VERSION), 3) + celery.VERSION = (0, 3, 0) + self.assertGreaterEqual(celery.__version__.count('.'), 2) + + def test_meta(self): + for m in ('__author__', '__contact__', '__homepage__', + '__docformat__'): + self.assertTrue(getattr(celery, m, None)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_control.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_control.py new file mode 100644 index 0000000..7a05506 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_control.py @@ -0,0 +1,251 @@ +from __future__ import absolute_import + +from functools import wraps + +from kombu.pidbox import Mailbox + +from celery.app import control +from celery.exceptions import DuplicateNodenameWarning +from celery.utils import uuid +from celery.tests.case import AppCase + + +class MockMailbox(Mailbox): + sent = [] + + def _publish(self, command, *args, **kwargs): + self.__class__.sent.append(command) + + def close(self): + pass + + def _collect(self, *args, **kwargs): + pass + + +class Control(control.Control): + Mailbox = MockMailbox + + +def with_mock_broadcast(fun): + + @wraps(fun) + def _resets(*args, **kwargs): + MockMailbox.sent = [] + try: + return fun(*args, **kwargs) + finally: + MockMailbox.sent = [] + return _resets + + +class test_flatten_reply(AppCase): + + def test_flatten_reply(self): + reply = [ + {'foo@example.com': {'hello': 10}}, + {'foo@example.com': {'hello': 20}}, + {'bar@example.com': {'hello': 30}} + ] + with self.assertWarns(DuplicateNodenameWarning) as w: + nodes = control.flatten_reply(reply) + + self.assertIn( + 'Received multiple replies from node name: foo@example.com.', + str(w.warning) + ) + self.assertIn('foo@example.com', nodes) + self.assertIn('bar@example.com', nodes) + + +class test_inspect(AppCase): + + def setup(self): + self.c = Control(app=self.app) + self.prev, self.app.control = self.app.control, self.c + self.i = self.c.inspect() + + def test_prepare_reply(self): + self.assertDictEqual(self.i._prepare([{'w1': {'ok': 1}}, + {'w2': {'ok': 1}}]), + {'w1': {'ok': 1}, 'w2': {'ok': 1}}) + + i = self.c.inspect(destination='w1') + self.assertEqual(i._prepare([{'w1': {'ok': 1}}]), + {'ok': 1}) + + @with_mock_broadcast + def test_active(self): + self.i.active() + self.assertIn('dump_active', MockMailbox.sent) + + @with_mock_broadcast + def test_clock(self): + self.i.clock() + self.assertIn('clock', MockMailbox.sent) + + @with_mock_broadcast + def test_conf(self): + self.i.conf() + self.assertIn('dump_conf', MockMailbox.sent) + + @with_mock_broadcast + def test_hello(self): + self.i.hello('george@vandelay.com') + self.assertIn('hello', MockMailbox.sent) + + @with_mock_broadcast + def test_memsample(self): + self.i.memsample() + self.assertIn('memsample', MockMailbox.sent) + + @with_mock_broadcast + def test_memdump(self): + self.i.memdump() + self.assertIn('memdump', MockMailbox.sent) + + @with_mock_broadcast + def test_objgraph(self): + self.i.objgraph() + self.assertIn('objgraph', MockMailbox.sent) + + @with_mock_broadcast + def test_scheduled(self): + self.i.scheduled() + self.assertIn('dump_schedule', MockMailbox.sent) + + @with_mock_broadcast + def test_reserved(self): + self.i.reserved() + self.assertIn('dump_reserved', MockMailbox.sent) + + @with_mock_broadcast + def test_stats(self): + self.i.stats() + self.assertIn('stats', MockMailbox.sent) + + @with_mock_broadcast + def test_revoked(self): + self.i.revoked() + self.assertIn('dump_revoked', MockMailbox.sent) + + @with_mock_broadcast + def test_tasks(self): + self.i.registered() + self.assertIn('dump_tasks', MockMailbox.sent) + + @with_mock_broadcast + def test_ping(self): + self.i.ping() + self.assertIn('ping', MockMailbox.sent) + + @with_mock_broadcast + def test_active_queues(self): + self.i.active_queues() + self.assertIn('active_queues', MockMailbox.sent) + + @with_mock_broadcast + def test_report(self): + self.i.report() + self.assertIn('report', MockMailbox.sent) + + +class test_Broadcast(AppCase): + + def setup(self): + self.control = Control(app=self.app) + self.app.control = self.control + + @self.app.task(shared=False) + def mytask(): + pass + self.mytask = mytask + + def test_purge(self): + self.control.purge() + + @with_mock_broadcast + def test_broadcast(self): + self.control.broadcast('foobarbaz', arguments=[]) + self.assertIn('foobarbaz', MockMailbox.sent) + + @with_mock_broadcast + def test_broadcast_limit(self): + self.control.broadcast( + 'foobarbaz1', arguments=[], limit=None, destination=[1, 2, 3], + ) + self.assertIn('foobarbaz1', MockMailbox.sent) + + @with_mock_broadcast + def test_broadcast_validate(self): + with self.assertRaises(ValueError): + self.control.broadcast('foobarbaz2', + destination='foo') + + @with_mock_broadcast + def test_rate_limit(self): + self.control.rate_limit(self.mytask.name, '100/m') + self.assertIn('rate_limit', MockMailbox.sent) + + @with_mock_broadcast + def test_time_limit(self): + self.control.time_limit(self.mytask.name, soft=10, hard=20) + self.assertIn('time_limit', MockMailbox.sent) + + @with_mock_broadcast + def test_add_consumer(self): + self.control.add_consumer('foo') + self.assertIn('add_consumer', MockMailbox.sent) + + @with_mock_broadcast + def test_cancel_consumer(self): + self.control.cancel_consumer('foo') + self.assertIn('cancel_consumer', MockMailbox.sent) + + @with_mock_broadcast + def test_enable_events(self): + self.control.enable_events() + self.assertIn('enable_events', MockMailbox.sent) + + @with_mock_broadcast + def test_disable_events(self): + self.control.disable_events() + self.assertIn('disable_events', MockMailbox.sent) + + @with_mock_broadcast + def test_revoke(self): + self.control.revoke('foozbaaz') + self.assertIn('revoke', MockMailbox.sent) + + @with_mock_broadcast + def test_ping(self): + self.control.ping() + self.assertIn('ping', MockMailbox.sent) + + @with_mock_broadcast + def test_election(self): + self.control.election('some_id', 'topic', 'action') + self.assertIn('election', MockMailbox.sent) + + @with_mock_broadcast + def test_pool_grow(self): + self.control.pool_grow(2) + self.assertIn('pool_grow', MockMailbox.sent) + + @with_mock_broadcast + def test_pool_shrink(self): + self.control.pool_shrink(2) + self.assertIn('pool_shrink', MockMailbox.sent) + + @with_mock_broadcast + def test_revoke_from_result(self): + self.app.AsyncResult('foozbazzbar').revoke() + self.assertIn('revoke', MockMailbox.sent) + + @with_mock_broadcast + def test_revoke_from_resultset(self): + r = self.app.GroupResult(uuid(), + [self.app.AsyncResult(x) + for x in [uuid() for i in range(10)]]) + r.revoke() + self.assertIn('revoke', MockMailbox.sent) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_defaults.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_defaults.py new file mode 100644 index 0000000..bf87f80 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_defaults.py @@ -0,0 +1,60 @@ +from __future__ import absolute_import + +import sys + +from importlib import import_module + +from celery.app.defaults import NAMESPACES + +from celery.tests.case import ( + AppCase, Mock, patch, pypy_version, sys_platform, +) + + +class test_defaults(AppCase): + + def setup(self): + self._prev = sys.modules.pop('celery.app.defaults', None) + + def teardown(self): + if self._prev: + sys.modules['celery.app.defaults'] = self._prev + + def test_option_repr(self): + self.assertTrue(repr(NAMESPACES['BROKER']['URL'])) + + def test_any(self): + val = object() + self.assertIs(self.defaults.Option.typemap['any'](val), val) + + def test_default_pool_pypy_14(self): + with sys_platform('darwin'): + with pypy_version((1, 4, 0)): + self.assertEqual(self.defaults.DEFAULT_POOL, 'solo') + + def test_default_pool_pypy_15(self): + with sys_platform('darwin'): + with pypy_version((1, 5, 0)): + self.assertEqual(self.defaults.DEFAULT_POOL, 'prefork') + + def test_deprecated(self): + source = Mock() + source.CELERYD_LOG_LEVEL = 2 + with patch('celery.utils.warn_deprecated') as warn: + self.defaults.find_deprecated_settings(source) + self.assertTrue(warn.called) + + def test_default_pool_jython(self): + with sys_platform('java 1.6.51'): + self.assertEqual(self.defaults.DEFAULT_POOL, 'threads') + + def test_find(self): + find = self.defaults.find + + self.assertEqual(find('server_email')[2].default, 'celery@localhost') + self.assertEqual(find('default_queue')[2].default, 'celery') + self.assertEqual(find('celery_default_exchange')[2], 'celery') + + @property + def defaults(self): + return import_module('celery.app.defaults') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_exceptions.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_exceptions.py new file mode 100644 index 0000000..25d2b4e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_exceptions.py @@ -0,0 +1,35 @@ +from __future__ import absolute_import + +import pickle + +from datetime import datetime + +from celery.exceptions import Reject, Retry + +from celery.tests.case import AppCase + + +class test_Retry(AppCase): + + def test_when_datetime(self): + x = Retry('foo', KeyError(), when=datetime.utcnow()) + self.assertTrue(x.humanize()) + + def test_pickleable(self): + x = Retry('foo', KeyError(), when=datetime.utcnow()) + self.assertTrue(pickle.loads(pickle.dumps(x))) + + +class test_Reject(AppCase): + + def test_attrs(self): + x = Reject('foo', requeue=True) + self.assertEqual(x.reason, 'foo') + self.assertTrue(x.requeue) + + def test_repr(self): + self.assertTrue(repr(Reject('foo', True))) + + def test_pickleable(self): + x = Retry('foo', True) + self.assertTrue(pickle.loads(pickle.dumps(x))) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_loaders.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_loaders.py new file mode 100644 index 0000000..cc9fb55 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_loaders.py @@ -0,0 +1,275 @@ +from __future__ import absolute_import + +import os +import sys +import warnings + +from celery import loaders +from celery.exceptions import ( + NotConfigured, +) +from celery.loaders import base +from celery.loaders import default +from celery.loaders.app import AppLoader +from celery.utils.imports import NotAPackage +from celery.utils.mail import SendmailWarning + +from celery.tests.case import ( + AppCase, Case, Mock, depends_on_current_app, patch, with_environ, +) + + +class DummyLoader(base.BaseLoader): + + def read_configuration(self): + return {'foo': 'bar', 'CELERY_IMPORTS': ('os', 'sys')} + + +class test_loaders(AppCase): + + def test_get_loader_cls(self): + self.assertEqual(loaders.get_loader_cls('default'), + default.Loader) + + @depends_on_current_app + def test_current_loader(self): + with self.assertPendingDeprecation(): + self.assertIs(loaders.current_loader(), self.app.loader) + + @depends_on_current_app + def test_load_settings(self): + with self.assertPendingDeprecation(): + self.assertIs(loaders.load_settings(), self.app.conf) + + +class test_LoaderBase(AppCase): + message_options = {'subject': 'Subject', + 'body': 'Body', + 'sender': 'x@x.com', + 'to': 'y@x.com'} + server_options = {'host': 'smtp.x.com', + 'port': 1234, + 'user': 'x', + 'password': 'qwerty', + 'timeout': 3} + + def setup(self): + self.loader = DummyLoader(app=self.app) + + def test_handlers_pass(self): + self.loader.on_task_init('foo.task', 'feedface-cafebabe') + self.loader.on_worker_init() + + def test_now(self): + self.assertTrue(self.loader.now(utc=True)) + self.assertTrue(self.loader.now(utc=False)) + + def test_read_configuration_no_env(self): + self.assertDictEqual( + base.BaseLoader(app=self.app).read_configuration( + 'FOO_X_S_WE_WQ_Q_WE'), + {}, + ) + + def test_autodiscovery(self): + with patch('celery.loaders.base.autodiscover_tasks') as auto: + auto.return_value = [Mock()] + auto.return_value[0].__name__ = 'moo' + self.loader.autodiscover_tasks(['A', 'B']) + self.assertIn('moo', self.loader.task_modules) + self.loader.task_modules.discard('moo') + + def test_import_task_module(self): + self.assertEqual(sys, self.loader.import_task_module('sys')) + + def test_init_worker_process(self): + self.loader.on_worker_process_init() + m = self.loader.on_worker_process_init = Mock() + self.loader.init_worker_process() + m.assert_called_with() + + def test_config_from_object_module(self): + self.loader.import_from_cwd = Mock() + self.loader.config_from_object('module_name') + self.loader.import_from_cwd.assert_called_with('module_name') + + def test_conf_property(self): + self.assertEqual(self.loader.conf['foo'], 'bar') + self.assertEqual(self.loader._conf['foo'], 'bar') + self.assertEqual(self.loader.conf['foo'], 'bar') + + def test_import_default_modules(self): + def modnames(l): + return [m.__name__ for m in l] + self.app.conf.CELERY_IMPORTS = ('os', 'sys') + self.assertEqual( + sorted(modnames(self.loader.import_default_modules())), + sorted(modnames([os, sys])), + ) + + def test_import_from_cwd_custom_imp(self): + + def imp(module, package=None): + imp.called = True + imp.called = False + + self.loader.import_from_cwd('foo', imp=imp) + self.assertTrue(imp.called) + + @patch('celery.utils.mail.Mailer._send') + def test_mail_admins_errors(self, send): + send.side_effect = KeyError() + opts = dict(self.message_options, **self.server_options) + + with self.assertWarnsRegex(SendmailWarning, r'KeyError'): + self.loader.mail_admins(fail_silently=True, **opts) + + with self.assertRaises(KeyError): + self.loader.mail_admins(fail_silently=False, **opts) + + @patch('celery.utils.mail.Mailer._send') + def test_mail_admins(self, send): + opts = dict(self.message_options, **self.server_options) + self.loader.mail_admins(**opts) + self.assertTrue(send.call_args) + message = send.call_args[0][0] + self.assertEqual(message.to, [self.message_options['to']]) + self.assertEqual(message.subject, self.message_options['subject']) + self.assertEqual(message.sender, self.message_options['sender']) + self.assertEqual(message.body, self.message_options['body']) + + def test_mail_attribute(self): + from celery.utils import mail + loader = base.BaseLoader(app=self.app) + self.assertIs(loader.mail, mail) + + def test_cmdline_config_ValueError(self): + with self.assertRaises(ValueError): + self.loader.cmdline_config_parser(['broker.port=foobar']) + + +class test_DefaultLoader(AppCase): + + @patch('celery.loaders.base.find_module') + def test_read_configuration_not_a_package(self, find_module): + find_module.side_effect = NotAPackage() + l = default.Loader(app=self.app) + with self.assertRaises(NotAPackage): + l.read_configuration(fail_silently=False) + + @patch('celery.loaders.base.find_module') + @with_environ('CELERY_CONFIG_MODULE', 'celeryconfig.py') + def test_read_configuration_py_in_name(self, find_module): + find_module.side_effect = NotAPackage() + l = default.Loader(app=self.app) + with self.assertRaises(NotAPackage): + l.read_configuration(fail_silently=False) + + @patch('celery.loaders.base.find_module') + def test_read_configuration_importerror(self, find_module): + default.C_WNOCONF = True + find_module.side_effect = ImportError() + l = default.Loader(app=self.app) + with self.assertWarnsRegex(NotConfigured, r'make sure it exists'): + l.read_configuration(fail_silently=True) + default.C_WNOCONF = False + l.read_configuration(fail_silently=True) + + def test_read_configuration(self): + from types import ModuleType + + class ConfigModule(ModuleType): + pass + + configname = os.environ.get('CELERY_CONFIG_MODULE') or 'celeryconfig' + celeryconfig = ConfigModule(configname) + celeryconfig.CELERY_IMPORTS = ('os', 'sys') + + prevconfig = sys.modules.get(configname) + sys.modules[configname] = celeryconfig + try: + l = default.Loader(app=self.app) + l.find_module = Mock(name='find_module') + settings = l.read_configuration(fail_silently=False) + self.assertTupleEqual(settings.CELERY_IMPORTS, ('os', 'sys')) + settings = l.read_configuration(fail_silently=False) + self.assertTupleEqual(settings.CELERY_IMPORTS, ('os', 'sys')) + l.on_worker_init() + finally: + if prevconfig: + sys.modules[configname] = prevconfig + + def test_import_from_cwd(self): + l = default.Loader(app=self.app) + old_path = list(sys.path) + try: + sys.path.remove(os.getcwd()) + except ValueError: + pass + celery = sys.modules.pop('celery', None) + sys.modules.pop('celery.five', None) + try: + self.assertTrue(l.import_from_cwd('celery')) + sys.modules.pop('celery', None) + sys.modules.pop('celery.five', None) + sys.path.insert(0, os.getcwd()) + self.assertTrue(l.import_from_cwd('celery')) + finally: + sys.path = old_path + sys.modules['celery'] = celery + + def test_unconfigured_settings(self): + context_executed = [False] + + class _Loader(default.Loader): + + def find_module(self, name): + raise ImportError(name) + + with warnings.catch_warnings(record=True): + l = _Loader(app=self.app) + self.assertFalse(l.configured) + context_executed[0] = True + self.assertTrue(context_executed[0]) + + +class test_AppLoader(AppCase): + + def setup(self): + self.loader = AppLoader(app=self.app) + + def test_on_worker_init(self): + self.app.conf.CELERY_IMPORTS = ('subprocess', ) + sys.modules.pop('subprocess', None) + self.loader.init_worker() + self.assertIn('subprocess', sys.modules) + + +class test_autodiscovery(Case): + + def test_autodiscover_tasks(self): + base._RACE_PROTECTION = True + try: + base.autodiscover_tasks(['foo']) + finally: + base._RACE_PROTECTION = False + with patch('celery.loaders.base.find_related_module') as frm: + base.autodiscover_tasks(['foo']) + self.assertTrue(frm.called) + + def test_find_related_module(self): + with patch('importlib.import_module') as imp: + with patch('imp.find_module') as find: + imp.return_value = Mock() + imp.return_value.__path__ = 'foo' + base.find_related_module(base, 'tasks') + + def se1(val): + imp.side_effect = AttributeError() + + imp.side_effect = se1 + base.find_related_module(base, 'tasks') + imp.side_effect = None + + find.side_effect = ImportError() + base.find_related_module(base, 'tasks') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_log.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_log.py new file mode 100644 index 0000000..588e39b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_log.py @@ -0,0 +1,385 @@ +from __future__ import absolute_import + +import sys +import logging + +from collections import defaultdict +from io import StringIO +from tempfile import mktemp + +from celery import signals +from celery.app.log import TaskFormatter +from celery.utils.log import LoggingProxy +from celery.utils import uuid +from celery.utils.log import ( + get_logger, + ColorFormatter, + logger as base_logger, + get_task_logger, + task_logger, + in_sighandler, + logger_isa, + ensure_process_aware_logger, +) +from celery.tests.case import ( + AppCase, Mock, SkipTest, + get_handlers, override_stdouts, patch, wrap_logger, restore_logging, +) + + +class test_TaskFormatter(AppCase): + + def test_no_task(self): + class Record(object): + msg = 'hello world' + levelname = 'info' + exc_text = exc_info = None + stack_info = None + + def getMessage(self): + return self.msg + record = Record() + x = TaskFormatter() + x.format(record) + self.assertEqual(record.task_name, '???') + self.assertEqual(record.task_id, '???') + + +class test_logger_isa(AppCase): + + def test_isa(self): + x = get_task_logger('Z1george') + self.assertTrue(logger_isa(x, task_logger)) + prev_x, x.parent = x.parent, None + try: + self.assertFalse(logger_isa(x, task_logger)) + finally: + x.parent = prev_x + + y = get_task_logger('Z1elaine') + y.parent = x + self.assertTrue(logger_isa(y, task_logger)) + self.assertTrue(logger_isa(y, x)) + self.assertTrue(logger_isa(y, y)) + + z = get_task_logger('Z1jerry') + z.parent = y + self.assertTrue(logger_isa(z, task_logger)) + self.assertTrue(logger_isa(z, y)) + self.assertTrue(logger_isa(z, x)) + self.assertTrue(logger_isa(z, z)) + + def test_recursive(self): + x = get_task_logger('X1foo') + prev, x.parent = x.parent, x + try: + with self.assertRaises(RuntimeError): + logger_isa(x, task_logger) + finally: + x.parent = prev + + y = get_task_logger('X2foo') + z = get_task_logger('X2foo') + prev_y, y.parent = y.parent, z + try: + prev_z, z.parent = z.parent, y + try: + with self.assertRaises(RuntimeError): + logger_isa(y, task_logger) + finally: + z.parent = prev_z + finally: + y.parent = prev_y + + +class test_ColorFormatter(AppCase): + + @patch('celery.utils.log.safe_str') + @patch('logging.Formatter.formatException') + def test_formatException_not_string(self, fe, safe_str): + x = ColorFormatter() + value = KeyError() + fe.return_value = value + self.assertIs(x.formatException(value), value) + self.assertTrue(fe.called) + self.assertFalse(safe_str.called) + + @patch('logging.Formatter.formatException') + @patch('celery.utils.log.safe_str') + def test_formatException_string(self, safe_str, fe): + x = ColorFormatter() + fe.return_value = 'HELLO' + try: + raise Exception() + except Exception: + self.assertTrue(x.formatException(sys.exc_info())) + if sys.version_info[0] == 2: + self.assertTrue(safe_str.called) + + @patch('logging.Formatter.format') + def test_format_object(self, _format): + x = ColorFormatter() + x.use_color = True + record = Mock() + record.levelname = 'ERROR' + record.msg = object() + self.assertTrue(x.format(record)) + + @patch('celery.utils.log.safe_str') + def test_format_raises(self, safe_str): + x = ColorFormatter() + + def on_safe_str(s): + try: + raise ValueError('foo') + finally: + safe_str.side_effect = None + safe_str.side_effect = on_safe_str + + class Record(object): + levelname = 'ERROR' + msg = 'HELLO' + exc_info = 1 + exc_text = 'error text' + stack_info = None + + def __str__(self): + return on_safe_str('') + + def getMessage(self): + return self.msg + + record = Record() + safe_str.return_value = record + + msg = x.format(record) + self.assertIn('= 3: + raise + else: + break + + def assertRelativedelta(self, due, last_ran): + try: + from dateutil.relativedelta import relativedelta + except ImportError: + return + l1, d1, n1 = due.remaining_delta(last_ran) + l2, d2, n2 = due.remaining_delta(last_ran, ffwd=relativedelta) + if not isinstance(d1, relativedelta): + self.assertEqual(l1, l2) + for field, value in items(d1._fields()): + self.assertEqual(getattr(d1, field), value) + self.assertFalse(d2.years) + self.assertFalse(d2.months) + self.assertFalse(d2.days) + self.assertFalse(d2.leapdays) + self.assertFalse(d2.hours) + self.assertFalse(d2.minutes) + self.assertFalse(d2.seconds) + self.assertFalse(d2.microseconds) + + def test_every_minute_execution_is_due(self): + last_ran = self.now - timedelta(seconds=61) + due, remaining = self.every_minute.is_due(last_ran) + self.assertRelativedelta(self.every_minute, last_ran) + self.assertTrue(due) + self.seconds_almost_equal(remaining, self.next_minute, 1) + + def test_every_minute_execution_is_not_due(self): + last_ran = self.now - timedelta(seconds=self.now.second) + due, remaining = self.every_minute.is_due(last_ran) + self.assertFalse(due) + self.seconds_almost_equal(remaining, self.next_minute, 1) + + def test_execution_is_due_on_saturday(self): + # 29th of May 2010 is a saturday + with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 29, 10, 30)): + last_ran = self.now - timedelta(seconds=61) + due, remaining = self.every_minute.is_due(last_ran) + self.assertTrue(due) + self.seconds_almost_equal(remaining, self.next_minute, 1) + + def test_execution_is_due_on_sunday(self): + # 30th of May 2010 is a sunday + with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 30, 10, 30)): + last_ran = self.now - timedelta(seconds=61) + due, remaining = self.every_minute.is_due(last_ran) + self.assertTrue(due) + self.seconds_almost_equal(remaining, self.next_minute, 1) + + def test_execution_is_due_on_monday(self): + # 31st of May 2010 is a monday + with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 31, 10, 30)): + last_ran = self.now - timedelta(seconds=61) + due, remaining = self.every_minute.is_due(last_ran) + self.assertTrue(due) + self.seconds_almost_equal(remaining, self.next_minute, 1) + + def test_every_hour_execution_is_due(self): + with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 10, 10, 30)): + due, remaining = self.hourly.is_due(datetime(2010, 5, 10, 6, 30)) + self.assertTrue(due) + self.assertEqual(remaining, 60 * 60) + + def test_every_hour_execution_is_not_due(self): + with patch_crontab_nowfun(self.hourly, datetime(2010, 5, 10, 10, 29)): + due, remaining = self.hourly.is_due(datetime(2010, 5, 10, 9, 30)) + self.assertFalse(due) + self.assertEqual(remaining, 60) + + def test_first_quarter_execution_is_due(self): + with patch_crontab_nowfun( + self.quarterly, datetime(2010, 5, 10, 10, 15)): + due, remaining = self.quarterly.is_due( + datetime(2010, 5, 10, 6, 30), + ) + self.assertTrue(due) + self.assertEqual(remaining, 15 * 60) + + def test_second_quarter_execution_is_due(self): + with patch_crontab_nowfun( + self.quarterly, datetime(2010, 5, 10, 10, 30)): + due, remaining = self.quarterly.is_due( + datetime(2010, 5, 10, 6, 30), + ) + self.assertTrue(due) + self.assertEqual(remaining, 15 * 60) + + def test_first_quarter_execution_is_not_due(self): + with patch_crontab_nowfun( + self.quarterly, datetime(2010, 5, 10, 10, 14)): + due, remaining = self.quarterly.is_due( + datetime(2010, 5, 10, 10, 0), + ) + self.assertFalse(due) + self.assertEqual(remaining, 60) + + def test_second_quarter_execution_is_not_due(self): + with patch_crontab_nowfun( + self.quarterly, datetime(2010, 5, 10, 10, 29)): + due, remaining = self.quarterly.is_due( + datetime(2010, 5, 10, 10, 15), + ) + self.assertFalse(due) + self.assertEqual(remaining, 60) + + def test_daily_execution_is_due(self): + with patch_crontab_nowfun(self.daily, datetime(2010, 5, 10, 7, 30)): + due, remaining = self.daily.is_due(datetime(2010, 5, 9, 7, 30)) + self.assertTrue(due) + self.assertEqual(remaining, 24 * 60 * 60) + + def test_daily_execution_is_not_due(self): + with patch_crontab_nowfun(self.daily, datetime(2010, 5, 10, 10, 30)): + due, remaining = self.daily.is_due(datetime(2010, 5, 10, 7, 30)) + self.assertFalse(due) + self.assertEqual(remaining, 21 * 60 * 60) + + def test_weekly_execution_is_due(self): + with patch_crontab_nowfun(self.weekly, datetime(2010, 5, 6, 7, 30)): + due, remaining = self.weekly.is_due(datetime(2010, 4, 30, 7, 30)) + self.assertTrue(due) + self.assertEqual(remaining, 7 * 24 * 60 * 60) + + def test_weekly_execution_is_not_due(self): + with patch_crontab_nowfun(self.weekly, datetime(2010, 5, 7, 10, 30)): + due, remaining = self.weekly.is_due(datetime(2010, 5, 6, 7, 30)) + self.assertFalse(due) + self.assertEqual(remaining, 6 * 24 * 60 * 60 - 3 * 60 * 60) + + def test_monthly_execution_is_due(self): + with patch_crontab_nowfun(self.monthly, datetime(2010, 5, 13, 7, 30)): + due, remaining = self.monthly.is_due(datetime(2010, 4, 8, 7, 30)) + self.assertTrue(due) + self.assertEqual(remaining, 28 * 24 * 60 * 60) + + def test_monthly_execution_is_not_due(self): + with patch_crontab_nowfun(self.monthly, datetime(2010, 5, 9, 10, 30)): + due, remaining = self.monthly.is_due(datetime(2010, 4, 8, 7, 30)) + self.assertFalse(due) + self.assertEqual(remaining, 4 * 24 * 60 * 60 - 3 * 60 * 60) + + def test_monthly_moy_execution_is_due(self): + with patch_crontab_nowfun( + self.monthly_moy, datetime(2014, 2, 26, 22, 0)): + due, remaining = self.monthly_moy.is_due( + datetime(2013, 7, 4, 10, 0), + ) + self.assertTrue(due) + self.assertEqual(remaining, 60.) + + def test_monthly_moy_execution_is_not_due(self): + raise SkipTest('unstable test') + with patch_crontab_nowfun( + self.monthly_moy, datetime(2013, 6, 28, 14, 30)): + due, remaining = self.monthly_moy.is_due( + datetime(2013, 6, 28, 22, 14), + ) + self.assertFalse(due) + attempt = ( + time.mktime(datetime(2014, 2, 26, 22, 0).timetuple()) - + time.mktime(datetime(2013, 6, 28, 14, 30).timetuple()) - + 60 * 60 + ) + self.assertEqual(remaining, attempt) + + def test_monthly_moy_execution_is_due2(self): + with patch_crontab_nowfun( + self.monthly_moy, datetime(2014, 2, 26, 22, 0)): + due, remaining = self.monthly_moy.is_due( + datetime(2013, 2, 28, 10, 0), + ) + self.assertTrue(due) + self.assertEqual(remaining, 60.) + + def test_monthly_moy_execution_is_not_due2(self): + with patch_crontab_nowfun( + self.monthly_moy, datetime(2014, 2, 26, 21, 0)): + due, remaining = self.monthly_moy.is_due( + datetime(2013, 6, 28, 22, 14), + ) + self.assertFalse(due) + attempt = 60 * 60 + self.assertEqual(remaining, attempt) + + def test_yearly_execution_is_due(self): + with patch_crontab_nowfun(self.yearly, datetime(2010, 3, 11, 7, 30)): + due, remaining = self.yearly.is_due(datetime(2009, 3, 12, 7, 30)) + self.assertTrue(due) + self.assertEqual(remaining, 364 * 24 * 60 * 60) + + def test_yearly_execution_is_not_due(self): + with patch_crontab_nowfun(self.yearly, datetime(2010, 3, 7, 10, 30)): + due, remaining = self.yearly.is_due(datetime(2009, 3, 12, 7, 30)) + self.assertFalse(due) + self.assertEqual(remaining, 4 * 24 * 60 * 60 - 3 * 60 * 60) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_utils.py b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_utils.py new file mode 100644 index 0000000..b0ff108 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/app/test_utils.py @@ -0,0 +1,46 @@ +from __future__ import absolute_import + +from collections import Mapping, MutableMapping + +from celery.app.utils import Settings, filter_hidden_settings, bugreport + +from celery.tests.case import AppCase, Mock + + +class TestSettings(AppCase): + """ + Tests of celery.app.utils.Settings + """ + def test_is_mapping(self): + """Settings should be a collections.Mapping""" + self.assertTrue(issubclass(Settings, Mapping)) + + def test_is_mutable_mapping(self): + """Settings should be a collections.MutableMapping""" + self.assertTrue(issubclass(Settings, MutableMapping)) + + +class test_filter_hidden_settings(AppCase): + + def test_handles_non_string_keys(self): + """filter_hidden_settings shouldn't raise an exception when handling + mappings with non-string keys""" + conf = { + 'STRING_KEY': 'VALUE1', + ('NON', 'STRING', 'KEY'): 'VALUE2', + 'STRING_KEY2': { + 'STRING_KEY3': 1, + ('NON', 'STRING', 'KEY', '2'): 2 + }, + } + filter_hidden_settings(conf) + + +class test_bugreport(AppCase): + + def test_no_conn_driver_info(self): + self.app.connection = Mock() + conn = self.app.connection.return_value = Mock() + conn.transport = None + + bugreport(self.app) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_amqp.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_amqp.py new file mode 100644 index 0000000..282f8b1 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_amqp.py @@ -0,0 +1,406 @@ +from __future__ import absolute_import + +import json +import pickle +import socket + +from contextlib import contextmanager +from datetime import timedelta +from pickle import dumps, loads + +from billiard.einfo import ExceptionInfo + +from celery import states +from celery.backends.amqp import AMQPBackend +from celery.exceptions import TimeoutError +from celery.five import Empty, Queue, range +from celery.utils import uuid + +from celery.tests.case import ( + AppCase, Mock, depends_on_current_app, patch, sleepdeprived, +) + + +class SomeClass(object): + + def __init__(self, data): + self.data = data + + +class test_AMQPBackend(AppCase): + + def create_backend(self, **opts): + opts = dict(dict(serializer='pickle', persistent=True), **opts) + return AMQPBackend(self.app, **opts) + + def test_mark_as_done(self): + tb1 = self.create_backend(max_cached_results=1) + tb2 = self.create_backend(max_cached_results=1) + + tid = uuid() + + tb1.mark_as_done(tid, 42) + self.assertEqual(tb2.get_status(tid), states.SUCCESS) + self.assertEqual(tb2.get_result(tid), 42) + self.assertTrue(tb2._cache.get(tid)) + self.assertTrue(tb2.get_result(tid), 42) + + @depends_on_current_app + def test_pickleable(self): + self.assertTrue(loads(dumps(self.create_backend()))) + + def test_revive(self): + tb = self.create_backend() + tb.revive(None) + + def test_is_pickled(self): + tb1 = self.create_backend() + tb2 = self.create_backend() + + tid2 = uuid() + result = {'foo': 'baz', 'bar': SomeClass(12345)} + tb1.mark_as_done(tid2, result) + # is serialized properly. + rindb = tb2.get_result(tid2) + self.assertEqual(rindb.get('foo'), 'baz') + self.assertEqual(rindb.get('bar').data, 12345) + + def test_mark_as_failure(self): + tb1 = self.create_backend() + tb2 = self.create_backend() + + tid3 = uuid() + try: + raise KeyError('foo') + except KeyError as exception: + einfo = ExceptionInfo() + tb1.mark_as_failure(tid3, exception, traceback=einfo.traceback) + self.assertEqual(tb2.get_status(tid3), states.FAILURE) + self.assertIsInstance(tb2.get_result(tid3), KeyError) + self.assertEqual(tb2.get_traceback(tid3), einfo.traceback) + + def test_repair_uuid(self): + from celery.backends.amqp import repair_uuid + for i in range(10): + tid = uuid() + self.assertEqual(repair_uuid(tid.replace('-', '')), tid) + + def test_expires_is_int(self): + b = self.create_backend(expires=48) + self.assertEqual(b.queue_arguments.get('x-expires'), 48 * 1000.0) + + def test_expires_is_float(self): + b = self.create_backend(expires=48.3) + self.assertEqual(b.queue_arguments.get('x-expires'), 48.3 * 1000.0) + + def test_expires_is_timedelta(self): + b = self.create_backend(expires=timedelta(minutes=1)) + self.assertEqual(b.queue_arguments.get('x-expires'), 60 * 1000.0) + + @sleepdeprived() + def test_store_result_retries(self): + iterations = [0] + stop_raising_at = [5] + + def publish(*args, **kwargs): + if iterations[0] > stop_raising_at[0]: + return + iterations[0] += 1 + raise KeyError('foo') + + backend = AMQPBackend(self.app) + from celery.app.amqp import TaskProducer + prod, TaskProducer.publish = TaskProducer.publish, publish + try: + with self.assertRaises(KeyError): + backend.retry_policy['max_retries'] = None + backend.store_result('foo', 'bar', 'STARTED') + + with self.assertRaises(KeyError): + backend.retry_policy['max_retries'] = 10 + backend.store_result('foo', 'bar', 'STARTED') + finally: + TaskProducer.publish = prod + + def assertState(self, retval, state): + self.assertEqual(retval['status'], state) + + def test_poll_no_messages(self): + b = self.create_backend() + self.assertState(b.get_task_meta(uuid()), states.PENDING) + + @contextmanager + def _result_context(self, serializer='pickle'): + results = Queue() + + class Message(object): + acked = 0 + requeued = 0 + + def __init__(self, **merge): + self.payload = dict({'status': states.STARTED, + 'result': None}, **merge) + if serializer == 'json': + self.body = json.dumps(self.payload) + self.content_type = 'application/json' + else: + self.body = pickle.dumps(self.payload) + self.content_type = 'application/x-python-serialize' + self.content_encoding = 'binary' + + def ack(self, *args, **kwargs): + self.acked += 1 + + def requeue(self, *args, **kwargs): + self.requeued += 1 + + class MockBinding(object): + + def __init__(self, *args, **kwargs): + self.channel = Mock() + + def __call__(self, *args, **kwargs): + return self + + def declare(self): + pass + + def get(self, no_ack=False, accept=None): + try: + m = results.get(block=False) + if m: + m.accept = accept + return m + except Empty: + pass + + def is_bound(self): + return True + + class MockBackend(AMQPBackend): + Queue = MockBinding + + backend = MockBackend(self.app, max_cached_results=100) + backend.serializer = serializer + backend._republish = Mock() + + yield results, backend, Message + + def test_backlog_limit_exceeded(self): + with self._result_context() as (results, backend, Message): + for i in range(1001): + results.put(Message(task_id='id', status=states.RECEIVED)) + with self.assertRaises(backend.BacklogLimitExceeded): + backend.get_task_meta('id') + + def test_poll_result(self): + with self._result_context() as (results, backend, Message): + tid = uuid() + # FFWD's to the latest state. + state_messages = [ + Message(task_id=tid, status=states.RECEIVED, seq=1), + Message(task_id=tid, status=states.STARTED, seq=2), + Message(task_id=tid, status=states.FAILURE, seq=3), + ] + for state_message in state_messages: + results.put(state_message) + r1 = backend.get_task_meta(tid) + self.assertDictContainsSubset( + { + 'status': states.FAILURE, + 'seq': 3 + }, r1, 'FFWDs to the last state', + ) + + # Caches last known state. + tid = uuid() + results.put(Message(task_id=tid)) + backend.get_task_meta(tid) + self.assertIn(tid, backend._cache, 'Caches last known state') + + self.assertTrue(state_messages[-1].requeued) + + # Returns cache if no new states. + results.queue.clear() + assert not results.qsize() + backend._cache[tid] = 'hello' + self.assertEqual( + backend.get_task_meta(tid), 'hello', + 'Returns cache if no new states', + ) + + def test_poll_result_for_json_serializer(self): + with self._result_context(serializer='json') as ( + results, backend, Message): + tid = uuid() + # FFWD's to the latest state. + state_messages = [ + Message(task_id=tid, status=states.RECEIVED, seq=1), + Message(task_id=tid, status=states.STARTED, seq=2), + Message(task_id=tid, status=states.FAILURE, seq=3, + result={ + 'exc_type': 'RuntimeError', + 'exc_message': 'Mock' + }), + ] + for state_message in state_messages: + results.put(state_message) + r1 = backend.get_task_meta(tid) + self.assertDictContainsSubset({ + 'status': states.FAILURE, + 'seq': 3 + }, r1, 'FFWDs to the last state') + self.assertEquals(type(r1['result']).__name__, 'RuntimeError') + self.assertEqual(str(r1['result']), 'Mock') + + # Caches last known state. + tid = uuid() + results.put(Message(task_id=tid)) + backend.get_task_meta(tid) + self.assertIn(tid, backend._cache, 'Caches last known state') + + self.assertTrue(state_messages[-1].requeued) + + # Returns cache if no new states. + results.queue.clear() + assert not results.qsize() + backend._cache[tid] = 'hello' + self.assertEqual( + backend.get_task_meta(tid), 'hello', + 'Returns cache if no new states', + ) + + def test_wait_for(self): + b = self.create_backend() + + tid = uuid() + with self.assertRaises(TimeoutError): + b.wait_for(tid, timeout=0.1) + b.store_result(tid, None, states.STARTED) + with self.assertRaises(TimeoutError): + b.wait_for(tid, timeout=0.1) + b.store_result(tid, None, states.RETRY) + with self.assertRaises(TimeoutError): + b.wait_for(tid, timeout=0.1) + b.store_result(tid, 42, states.SUCCESS) + self.assertEqual(b.wait_for(tid, timeout=1)['result'], 42) + b.store_result(tid, 56, states.SUCCESS) + self.assertEqual(b.wait_for(tid, timeout=1)['result'], 42, + 'result is cached') + self.assertEqual(b.wait_for(tid, timeout=1, cache=False)['result'], 56) + b.store_result(tid, KeyError('foo'), states.FAILURE) + res = b.wait_for(tid, timeout=1, cache=False) + self.assertEqual(res['status'], states.FAILURE) + b.store_result(tid, KeyError('foo'), states.PENDING) + with self.assertRaises(TimeoutError): + b.wait_for(tid, timeout=0.01, cache=False) + + def test_drain_events_remaining_timeouts(self): + + class Connection(object): + + def drain_events(self, timeout=None): + pass + + b = self.create_backend() + with self.app.pool.acquire_channel(block=False) as (_, channel): + binding = b._create_binding(uuid()) + consumer = b.Consumer(channel, binding, no_ack=True) + with self.assertRaises(socket.timeout): + b.drain_events(Connection(), consumer, timeout=0.1) + + def test_get_many(self): + b = self.create_backend(max_cached_results=10) + + tids = [] + for i in range(10): + tid = uuid() + b.store_result(tid, i, states.SUCCESS) + tids.append(tid) + + res = list(b.get_many(tids, timeout=1)) + expected_results = [ + (task_id, { + 'status': states.SUCCESS, + 'result': i, + 'traceback': None, + 'task_id': task_id, + 'children': None, + }) + for i, task_id in enumerate(tids) + ] + self.assertEqual(sorted(res), sorted(expected_results)) + self.assertDictEqual(b._cache[res[0][0]], res[0][1]) + cached_res = list(b.get_many(tids, timeout=1)) + self.assertEqual(sorted(cached_res), sorted(expected_results)) + + # times out when not ready in cache (this shouldn't happen) + b._cache[res[0][0]]['status'] = states.RETRY + with self.assertRaises(socket.timeout): + list(b.get_many(tids, timeout=0.01)) + + # times out when result not yet ready + with self.assertRaises(socket.timeout): + tids = [uuid()] + b.store_result(tids[0], i, states.PENDING) + list(b.get_many(tids, timeout=0.01)) + + def test_get_many_raises_outer_block(self): + + class Backend(AMQPBackend): + + def Consumer(*args, **kwargs): + raise KeyError('foo') + + b = Backend(self.app) + with self.assertRaises(KeyError): + next(b.get_many(['id1'])) + + def test_get_many_raises_inner_block(self): + with patch('kombu.connection.Connection.drain_events') as drain: + drain.side_effect = KeyError('foo') + b = AMQPBackend(self.app) + with self.assertRaises(KeyError): + next(b.get_many(['id1'])) + + def test_consume_raises_inner_block(self): + with patch('kombu.connection.Connection.drain_events') as drain: + + def se(*args, **kwargs): + drain.side_effect = ValueError() + raise KeyError('foo') + drain.side_effect = se + b = AMQPBackend(self.app) + with self.assertRaises(ValueError): + next(b.consume('id1')) + + def test_no_expires(self): + b = self.create_backend(expires=None) + app = self.app + app.conf.CELERY_TASK_RESULT_EXPIRES = None + b = self.create_backend(expires=None) + with self.assertRaises(KeyError): + b.queue_arguments['x-expires'] + + def test_process_cleanup(self): + self.create_backend().process_cleanup() + + def test_reload_task_result(self): + with self.assertRaises(NotImplementedError): + self.create_backend().reload_task_result('x') + + def test_reload_group_result(self): + with self.assertRaises(NotImplementedError): + self.create_backend().reload_group_result('x') + + def test_save_group(self): + with self.assertRaises(NotImplementedError): + self.create_backend().save_group('x', 'x') + + def test_restore_group(self): + with self.assertRaises(NotImplementedError): + self.create_backend().restore_group('x') + + def test_delete_group(self): + with self.assertRaises(NotImplementedError): + self.create_backend().delete_group('x') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_backends.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_backends.py new file mode 100644 index 0000000..d301e55 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_backends.py @@ -0,0 +1,41 @@ +from __future__ import absolute_import + +from celery import backends +from celery.exceptions import ImproperlyConfigured +from celery.backends.amqp import AMQPBackend +from celery.backends.cache import CacheBackend +from celery.tests.case import AppCase, depends_on_current_app, patch + + +class test_backends(AppCase): + + def test_get_backend_aliases(self): + expects = [('amqp://', AMQPBackend), + ('cache+memory://', CacheBackend)] + + for url, expect_cls in expects: + backend, url = backends.get_backend_by_url(url, self.app.loader) + self.assertIsInstance( + backend(app=self.app, url=url), + expect_cls, + ) + + def test_unknown_backend(self): + with self.assertRaises(ImportError): + backends.get_backend_cls('fasodaopjeqijwqe', self.app.loader) + + @depends_on_current_app + def test_default_backend(self): + self.assertEqual(backends.default_backend, self.app.backend) + + def test_backend_by_url(self, url='redis://localhost/1'): + from celery.backends.redis import RedisBackend + backend, url_ = backends.get_backend_by_url(url, self.app.loader) + self.assertIs(backend, RedisBackend) + self.assertEqual(url_, url) + + def test_sym_raises_ValuError(self): + with patch('celery.backends.symbol_by_name') as sbn: + sbn.side_effect = ValueError() + with self.assertRaises(ImproperlyConfigured): + backends.get_backend_cls('xxx.xxx:foo', self.app.loader) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_base.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_base.py new file mode 100644 index 0000000..f54dc07 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_base.py @@ -0,0 +1,466 @@ +from __future__ import absolute_import + +import sys +import types + +from contextlib import contextmanager + +from celery.exceptions import ChordError +from celery.five import items, range +from celery.utils import serialization +from celery.utils.serialization import subclass_exception +from celery.utils.serialization import find_pickleable_exception as fnpe +from celery.utils.serialization import UnpickleableExceptionWrapper +from celery.utils.serialization import get_pickleable_exception as gpe + +from celery import states +from celery import group +from celery.backends.base import ( + BaseBackend, + KeyValueStoreBackend, + DisabledBackend, +) +from celery.result import result_from_tuple +from celery.utils import uuid + +from celery.tests.case import AppCase, Mock, SkipTest, patch + + +class wrapobject(object): + + def __init__(self, *args, **kwargs): + self.args = args + +if sys.version_info[0] == 3 or getattr(sys, 'pypy_version_info', None): + Oldstyle = None +else: + Oldstyle = types.ClassType('Oldstyle', (), {}) +Unpickleable = subclass_exception('Unpickleable', KeyError, 'foo.module') +Impossible = subclass_exception('Impossible', object, 'foo.module') +Lookalike = subclass_exception('Lookalike', wrapobject, 'foo.module') + + +class test_serialization(AppCase): + + def test_create_exception_cls(self): + self.assertTrue(serialization.create_exception_cls('FooError', 'm')) + self.assertTrue(serialization.create_exception_cls('FooError', 'm', + KeyError)) + + +class test_BaseBackend_interface(AppCase): + + def setup(self): + self.b = BaseBackend(self.app) + + def test__forget(self): + with self.assertRaises(NotImplementedError): + self.b._forget('SOMExx-N0Nex1stant-IDxx-') + + def test_forget(self): + with self.assertRaises(NotImplementedError): + self.b.forget('SOMExx-N0nex1stant-IDxx-') + + def test_on_chord_part_return(self): + self.b.on_chord_part_return(None, None, None) + + def test_apply_chord(self, unlock='celery.chord_unlock'): + self.app.tasks[unlock] = Mock() + self.b.apply_chord( + group(app=self.app), (), 'dakj221', None, + result=[self.app.AsyncResult(x) for x in [1, 2, 3]], + ) + self.assertTrue(self.app.tasks[unlock].apply_async.call_count) + + +class test_exception_pickle(AppCase): + + def test_oldstyle(self): + if Oldstyle is None: + raise SkipTest('py3k does not support old style classes') + self.assertTrue(fnpe(Oldstyle())) + + def test_BaseException(self): + self.assertIsNone(fnpe(Exception())) + + def test_get_pickleable_exception(self): + exc = Exception('foo') + self.assertEqual(gpe(exc), exc) + + def test_unpickleable(self): + self.assertIsInstance(fnpe(Unpickleable()), KeyError) + self.assertIsNone(fnpe(Impossible())) + + +class test_prepare_exception(AppCase): + + def setup(self): + self.b = BaseBackend(self.app) + + def test_unpickleable(self): + x = self.b.prepare_exception(Unpickleable(1, 2, 'foo')) + self.assertIsInstance(x, KeyError) + y = self.b.exception_to_python(x) + self.assertIsInstance(y, KeyError) + + def test_impossible(self): + x = self.b.prepare_exception(Impossible()) + self.assertIsInstance(x, UnpickleableExceptionWrapper) + self.assertTrue(str(x)) + y = self.b.exception_to_python(x) + self.assertEqual(y.__class__.__name__, 'Impossible') + if sys.version_info < (2, 5): + self.assertTrue(y.__class__.__module__) + else: + self.assertEqual(y.__class__.__module__, 'foo.module') + + def test_regular(self): + x = self.b.prepare_exception(KeyError('baz')) + self.assertIsInstance(x, KeyError) + y = self.b.exception_to_python(x) + self.assertIsInstance(y, KeyError) + + +class KVBackend(KeyValueStoreBackend): + mget_returns_dict = False + + def __init__(self, app, *args, **kwargs): + self.db = {} + super(KVBackend, self).__init__(app) + + def get(self, key): + return self.db.get(key) + + def set(self, key, value): + self.db[key] = value + + def mget(self, keys): + if self.mget_returns_dict: + return dict((key, self.get(key)) for key in keys) + else: + return [self.get(k) for k in keys] + + def delete(self, key): + self.db.pop(key, None) + + +class DictBackend(BaseBackend): + + def __init__(self, *args, **kwargs): + BaseBackend.__init__(self, *args, **kwargs) + self._data = {'can-delete': {'result': 'foo'}} + + def _restore_group(self, group_id): + if group_id == 'exists': + return {'result': 'group'} + + def _get_task_meta_for(self, task_id): + if task_id == 'task-exists': + return {'result': 'task'} + + def _delete_group(self, group_id): + self._data.pop(group_id, None) + + +class test_BaseBackend_dict(AppCase): + + def setup(self): + self.b = DictBackend(app=self.app) + + def test_delete_group(self): + self.b.delete_group('can-delete') + self.assertNotIn('can-delete', self.b._data) + + def test_prepare_exception_json(self): + x = DictBackend(self.app, serializer='json') + e = x.prepare_exception(KeyError('foo')) + self.assertIn('exc_type', e) + e = x.exception_to_python(e) + self.assertEqual(e.__class__.__name__, 'KeyError') + self.assertEqual(str(e), "'foo'") + + def test_save_group(self): + b = BaseBackend(self.app) + b._save_group = Mock() + b.save_group('foofoo', 'xxx') + b._save_group.assert_called_with('foofoo', 'xxx') + + def test_forget_interface(self): + b = BaseBackend(self.app) + with self.assertRaises(NotImplementedError): + b.forget('foo') + + def test_restore_group(self): + self.assertIsNone(self.b.restore_group('missing')) + self.assertIsNone(self.b.restore_group('missing')) + self.assertEqual(self.b.restore_group('exists'), 'group') + self.assertEqual(self.b.restore_group('exists'), 'group') + self.assertEqual(self.b.restore_group('exists', cache=False), 'group') + + def test_reload_group_result(self): + self.b._cache = {} + self.b.reload_group_result('exists') + self.b._cache['exists'] = {'result': 'group'} + + def test_reload_task_result(self): + self.b._cache = {} + self.b.reload_task_result('task-exists') + self.b._cache['task-exists'] = {'result': 'task'} + + def test_fail_from_current_stack(self): + self.b.mark_as_failure = Mock() + try: + raise KeyError('foo') + except KeyError as exc: + self.b.fail_from_current_stack('task_id') + self.assertTrue(self.b.mark_as_failure.called) + args = self.b.mark_as_failure.call_args[0] + self.assertEqual(args[0], 'task_id') + self.assertIs(args[1], exc) + self.assertTrue(args[2]) + + def test_prepare_value_serializes_group_result(self): + self.b.serializer = 'json' + g = self.app.GroupResult('group_id', [self.app.AsyncResult('foo')]) + v = self.b.prepare_value(g) + self.assertIsInstance(v, (list, tuple)) + self.assertEqual(result_from_tuple(v, app=self.app), g) + + v2 = self.b.prepare_value(g[0]) + self.assertIsInstance(v2, (list, tuple)) + self.assertEqual(result_from_tuple(v2, app=self.app), g[0]) + + self.b.serializer = 'pickle' + self.assertIsInstance(self.b.prepare_value(g), self.app.GroupResult) + + def test_is_cached(self): + b = BaseBackend(app=self.app, max_cached_results=1) + b._cache['foo'] = 1 + self.assertTrue(b.is_cached('foo')) + self.assertFalse(b.is_cached('false')) + + +class test_KeyValueStoreBackend(AppCase): + + def setup(self): + self.b = KVBackend(app=self.app) + + def test_on_chord_part_return(self): + assert not self.b.implements_incr + self.b.on_chord_part_return(None, None, None) + + def test_get_store_delete_result(self): + tid = uuid() + self.b.mark_as_done(tid, 'Hello world') + self.assertEqual(self.b.get_result(tid), 'Hello world') + self.assertEqual(self.b.get_status(tid), states.SUCCESS) + self.b.forget(tid) + self.assertEqual(self.b.get_status(tid), states.PENDING) + + def test_strip_prefix(self): + x = self.b.get_key_for_task('x1b34') + self.assertEqual(self.b._strip_prefix(x), 'x1b34') + self.assertEqual(self.b._strip_prefix('x1b34'), 'x1b34') + + def test_get_many(self): + for is_dict in True, False: + self.b.mget_returns_dict = is_dict + ids = dict((uuid(), i) for i in range(10)) + for id, i in items(ids): + self.b.mark_as_done(id, i) + it = self.b.get_many(list(ids)) + for i, (got_id, got_state) in enumerate(it): + self.assertEqual(got_state['result'], ids[got_id]) + self.assertEqual(i, 9) + self.assertTrue(list(self.b.get_many(list(ids)))) + + def test_get_many_times_out(self): + tasks = [uuid() for _ in range(4)] + self.b._cache[tasks[1]] = {'status': 'PENDING'} + with self.assertRaises(self.b.TimeoutError): + list(self.b.get_many(tasks, timeout=0.01, interval=0.01)) + + def test_chord_part_return_no_gid(self): + self.b.implements_incr = True + task = Mock() + state = 'SUCCESS' + result = 10 + task.request.group = None + self.b.get_key_for_chord = Mock() + self.b.get_key_for_chord.side_effect = AssertionError( + 'should not get here', + ) + self.assertIsNone(self.b.on_chord_part_return(task, state, result)) + + @contextmanager + def _chord_part_context(self, b): + + @self.app.task(shared=False) + def callback(result): + pass + + b.implements_incr = True + b.client = Mock() + with patch('celery.backends.base.GroupResult') as GR: + deps = GR.restore.return_value = Mock(name='DEPS') + deps.__len__ = Mock() + deps.__len__.return_value = 10 + b.incr = Mock() + b.incr.return_value = 10 + b.expire = Mock() + task = Mock() + task.request.group = 'grid' + cb = task.request.chord = callback.s() + task.request.chord.freeze() + callback.backend = b + callback.backend.fail_from_current_stack = Mock() + yield task, deps, cb + + def test_chord_part_return_propagate_set(self): + with self._chord_part_context(self.b) as (task, deps, _): + self.b.on_chord_part_return(task, 'SUCCESS', 10, propagate=True) + self.assertFalse(self.b.expire.called) + deps.delete.assert_called_with() + deps.join_native.assert_called_with(propagate=True, timeout=3.0) + + def test_chord_part_return_propagate_default(self): + with self._chord_part_context(self.b) as (task, deps, _): + self.b.on_chord_part_return(task, 'SUCCESS', 10, propagate=None) + self.assertFalse(self.b.expire.called) + deps.delete.assert_called_with() + deps.join_native.assert_called_with( + propagate=self.b.app.conf.CELERY_CHORD_PROPAGATES, + timeout=3.0, + ) + + def test_chord_part_return_join_raises_internal(self): + with self._chord_part_context(self.b) as (task, deps, callback): + deps._failed_join_report = lambda: iter([]) + deps.join_native.side_effect = KeyError('foo') + self.b.on_chord_part_return(task, 'SUCCESS', 10) + self.assertTrue(self.b.fail_from_current_stack.called) + args = self.b.fail_from_current_stack.call_args + exc = args[1]['exc'] + self.assertIsInstance(exc, ChordError) + self.assertIn('foo', str(exc)) + + def test_chord_part_return_join_raises_task(self): + b = KVBackend(serializer='pickle', app=self.app) + with self._chord_part_context(b) as (task, deps, callback): + deps._failed_join_report = lambda: iter([ + self.app.AsyncResult('culprit'), + ]) + deps.join_native.side_effect = KeyError('foo') + b.on_chord_part_return(task, 'SUCCESS', 10) + self.assertTrue(b.fail_from_current_stack.called) + args = b.fail_from_current_stack.call_args + exc = args[1]['exc'] + self.assertIsInstance(exc, ChordError) + self.assertIn('Dependency culprit raised', str(exc)) + + def test_restore_group_from_json(self): + b = KVBackend(serializer='json', app=self.app) + g = self.app.GroupResult( + 'group_id', + [self.app.AsyncResult('a'), self.app.AsyncResult('b')], + ) + b._save_group(g.id, g) + g2 = b._restore_group(g.id)['result'] + self.assertEqual(g2, g) + + def test_restore_group_from_pickle(self): + b = KVBackend(serializer='pickle', app=self.app) + g = self.app.GroupResult( + 'group_id', + [self.app.AsyncResult('a'), self.app.AsyncResult('b')], + ) + b._save_group(g.id, g) + g2 = b._restore_group(g.id)['result'] + self.assertEqual(g2, g) + + def test_chord_apply_fallback(self): + self.b.implements_incr = False + self.b.fallback_chord_unlock = Mock() + self.b.apply_chord( + group(app=self.app), (), 'group_id', 'body', + result='result', foo=1, + ) + self.b.fallback_chord_unlock.assert_called_with( + 'group_id', 'body', result='result', foo=1, + ) + + def test_get_missing_meta(self): + self.assertIsNone(self.b.get_result('xxx-missing')) + self.assertEqual(self.b.get_status('xxx-missing'), states.PENDING) + + def test_save_restore_delete_group(self): + tid = uuid() + tsr = self.app.GroupResult( + tid, [self.app.AsyncResult(uuid()) for _ in range(10)], + ) + self.b.save_group(tid, tsr) + self.b.restore_group(tid) + self.assertEqual(self.b.restore_group(tid), tsr) + self.b.delete_group(tid) + self.assertIsNone(self.b.restore_group(tid)) + + def test_restore_missing_group(self): + self.assertIsNone(self.b.restore_group('xxx-nonexistant')) + + +class test_KeyValueStoreBackend_interface(AppCase): + + def test_get(self): + with self.assertRaises(NotImplementedError): + KeyValueStoreBackend(self.app).get('a') + + def test_set(self): + with self.assertRaises(NotImplementedError): + KeyValueStoreBackend(self.app).set('a', 1) + + def test_incr(self): + with self.assertRaises(NotImplementedError): + KeyValueStoreBackend(self.app).incr('a') + + def test_cleanup(self): + self.assertFalse(KeyValueStoreBackend(self.app).cleanup()) + + def test_delete(self): + with self.assertRaises(NotImplementedError): + KeyValueStoreBackend(self.app).delete('a') + + def test_mget(self): + with self.assertRaises(NotImplementedError): + KeyValueStoreBackend(self.app).mget(['a']) + + def test_forget(self): + with self.assertRaises(NotImplementedError): + KeyValueStoreBackend(self.app).forget('a') + + +class test_DisabledBackend(AppCase): + + def test_store_result(self): + DisabledBackend(self.app).store_result() + + def test_is_disabled(self): + with self.assertRaises(NotImplementedError): + DisabledBackend(self.app).get_status('foo') + + def test_as_uri(self): + self.assertEqual(DisabledBackend(self.app).as_uri(), 'disabled://') + + +class test_as_uri(AppCase): + + def setup(self): + self.b = BaseBackend( + app=self.app, + url='sch://uuuu:pwpw@hostname.dom' + ) + + def test_as_uri_include_password(self): + self.assertEqual(self.b.as_uri(True), self.b.url) + + def test_as_uri_exclude_password(self): + self.assertEqual(self.b.as_uri(), 'sch://uuuu:**@hostname.dom/') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cache.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cache.py new file mode 100644 index 0000000..fcd8dd5 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cache.py @@ -0,0 +1,280 @@ +from __future__ import absolute_import + +import sys +import types + +from contextlib import contextmanager + +from kombu.utils.encoding import str_to_bytes, ensure_bytes + +from celery import signature +from celery import states +from celery import group +from celery.backends.cache import CacheBackend, DummyClient, backends +from celery.exceptions import ImproperlyConfigured +from celery.five import items, string, text_t +from celery.utils import uuid + +from celery.tests.case import ( + AppCase, Mock, disable_stdouts, mask_modules, patch, reset_modules, +) + +PY3 = sys.version_info[0] == 3 + + +class SomeClass(object): + + def __init__(self, data): + self.data = data + + +class test_CacheBackend(AppCase): + + def setup(self): + self.tb = CacheBackend(backend='memory://', app=self.app) + self.tid = uuid() + self.old_get_best_memcached = backends['memcache'] + backends['memcache'] = lambda: (DummyClient, ensure_bytes) + + def teardown(self): + backends['memcache'] = self.old_get_best_memcached + + def test_no_backend(self): + self.app.conf.CELERY_CACHE_BACKEND = None + with self.assertRaises(ImproperlyConfigured): + CacheBackend(backend=None, app=self.app) + + def test_mark_as_done(self): + self.assertEqual(self.tb.get_status(self.tid), states.PENDING) + self.assertIsNone(self.tb.get_result(self.tid)) + + self.tb.mark_as_done(self.tid, 42) + self.assertEqual(self.tb.get_status(self.tid), states.SUCCESS) + self.assertEqual(self.tb.get_result(self.tid), 42) + + def test_is_pickled(self): + result = {'foo': 'baz', 'bar': SomeClass(12345)} + self.tb.mark_as_done(self.tid, result) + # is serialized properly. + rindb = self.tb.get_result(self.tid) + self.assertEqual(rindb.get('foo'), 'baz') + self.assertEqual(rindb.get('bar').data, 12345) + + def test_mark_as_failure(self): + try: + raise KeyError('foo') + except KeyError as exception: + self.tb.mark_as_failure(self.tid, exception) + self.assertEqual(self.tb.get_status(self.tid), states.FAILURE) + self.assertIsInstance(self.tb.get_result(self.tid), KeyError) + + def test_apply_chord(self): + tb = CacheBackend(backend='memory://', app=self.app) + gid, res = uuid(), [self.app.AsyncResult(uuid()) for _ in range(3)] + tb.apply_chord(group(app=self.app), (), gid, {}, result=res) + + @patch('celery.result.GroupResult.restore') + def test_on_chord_part_return(self, restore): + tb = CacheBackend(backend='memory://', app=self.app) + + deps = Mock() + deps.__len__ = Mock() + deps.__len__.return_value = 2 + restore.return_value = deps + task = Mock() + task.name = 'foobarbaz' + self.app.tasks['foobarbaz'] = task + task.request.chord = signature(task) + + gid, res = uuid(), [self.app.AsyncResult(uuid()) for _ in range(3)] + task.request.group = gid + tb.apply_chord(group(app=self.app), (), gid, {}, result=res) + + self.assertFalse(deps.join_native.called) + tb.on_chord_part_return(task, 'SUCCESS', 10) + self.assertFalse(deps.join_native.called) + + tb.on_chord_part_return(task, 'SUCCESS', 10) + deps.join_native.assert_called_with(propagate=True, timeout=3.0) + deps.delete.assert_called_with() + + def test_mget(self): + self.tb.set('foo', 1) + self.tb.set('bar', 2) + + self.assertDictEqual(self.tb.mget(['foo', 'bar']), + {'foo': 1, 'bar': 2}) + + def test_forget(self): + self.tb.mark_as_done(self.tid, {'foo': 'bar'}) + x = self.app.AsyncResult(self.tid, backend=self.tb) + x.forget() + self.assertIsNone(x.result) + + def test_process_cleanup(self): + self.tb.process_cleanup() + + def test_expires_as_int(self): + tb = CacheBackend(backend='memory://', expires=10, app=self.app) + self.assertEqual(tb.expires, 10) + + def test_unknown_backend_raises_ImproperlyConfigured(self): + with self.assertRaises(ImproperlyConfigured): + CacheBackend(backend='unknown://', app=self.app) + + def test_as_uri_no_servers(self): + self.assertEqual(self.tb.as_uri(), 'memory:///') + + def test_as_uri_one_server(self): + backend = 'memcache://127.0.0.1:11211/' + b = CacheBackend(backend=backend, app=self.app) + self.assertEqual(b.as_uri(), backend) + + def test_as_uri_multiple_servers(self): + backend = 'memcache://127.0.0.1:11211;127.0.0.2:11211;127.0.0.3/' + b = CacheBackend(backend=backend, app=self.app) + self.assertEqual(b.as_uri(), backend) + + @disable_stdouts + def test_regression_worker_startup_info(self): + self.app.conf.result_backend = ( + 'cache+memcached://127.0.0.1:11211;127.0.0.2:11211;127.0.0.3/' + ) + worker = self.app.Worker() + worker.on_start() + self.assertTrue(worker.startup_info()) + + +class MyMemcachedStringEncodingError(Exception): + pass + + +class MemcachedClient(DummyClient): + + def set(self, key, value, *args, **kwargs): + if PY3: + key_t, must_be, not_be, cod = bytes, 'string', 'bytes', 'decode' + else: + key_t, must_be, not_be, cod = text_t, 'bytes', 'string', 'encode' + if isinstance(key, key_t): + raise MyMemcachedStringEncodingError( + 'Keys must be {0}, not {1}. Convert your ' + 'strings using mystring.{2}(charset)!'.format( + must_be, not_be, cod)) + return super(MemcachedClient, self).set(key, value, *args, **kwargs) + + +class MockCacheMixin(object): + + @contextmanager + def mock_memcache(self): + memcache = types.ModuleType('memcache') + memcache.Client = MemcachedClient + memcache.Client.__module__ = memcache.__name__ + prev, sys.modules['memcache'] = sys.modules.get('memcache'), memcache + try: + yield True + finally: + if prev is not None: + sys.modules['memcache'] = prev + + @contextmanager + def mock_pylibmc(self): + pylibmc = types.ModuleType('pylibmc') + pylibmc.Client = MemcachedClient + pylibmc.Client.__module__ = pylibmc.__name__ + prev = sys.modules.get('pylibmc') + sys.modules['pylibmc'] = pylibmc + try: + yield True + finally: + if prev is not None: + sys.modules['pylibmc'] = prev + + +class test_get_best_memcache(AppCase, MockCacheMixin): + + def test_pylibmc(self): + with self.mock_pylibmc(): + with reset_modules('celery.backends.cache'): + from celery.backends import cache + cache._imp = [None] + self.assertEqual(cache.get_best_memcache()[0].__module__, + 'pylibmc') + + def test_memcache(self): + with self.mock_memcache(): + with reset_modules('celery.backends.cache'): + with mask_modules('pylibmc'): + from celery.backends import cache + cache._imp = [None] + self.assertEqual(cache.get_best_memcache()[0]().__module__, + 'memcache') + + def test_no_implementations(self): + with mask_modules('pylibmc', 'memcache'): + with reset_modules('celery.backends.cache'): + from celery.backends import cache + cache._imp = [None] + with self.assertRaises(ImproperlyConfigured): + cache.get_best_memcache() + + def test_cached(self): + with self.mock_pylibmc(): + with reset_modules('celery.backends.cache'): + from celery.backends import cache + cache._imp = [None] + cache.get_best_memcache()[0](behaviors={'foo': 'bar'}) + self.assertTrue(cache._imp[0]) + cache.get_best_memcache()[0]() + + def test_backends(self): + from celery.backends.cache import backends + with self.mock_memcache(): + for name, fun in items(backends): + self.assertTrue(fun()) + + +class test_memcache_key(AppCase, MockCacheMixin): + + def test_memcache_unicode_key(self): + with self.mock_memcache(): + with reset_modules('celery.backends.cache'): + with mask_modules('pylibmc'): + from celery.backends import cache + cache._imp = [None] + task_id, result = string(uuid()), 42 + b = cache.CacheBackend(backend='memcache', app=self.app) + b.store_result(task_id, result, status=states.SUCCESS) + self.assertEqual(b.get_result(task_id), result) + + def test_memcache_bytes_key(self): + with self.mock_memcache(): + with reset_modules('celery.backends.cache'): + with mask_modules('pylibmc'): + from celery.backends import cache + cache._imp = [None] + task_id, result = str_to_bytes(uuid()), 42 + b = cache.CacheBackend(backend='memcache', app=self.app) + b.store_result(task_id, result, status=states.SUCCESS) + self.assertEqual(b.get_result(task_id), result) + + def test_pylibmc_unicode_key(self): + with reset_modules('celery.backends.cache'): + with self.mock_pylibmc(): + from celery.backends import cache + cache._imp = [None] + task_id, result = string(uuid()), 42 + b = cache.CacheBackend(backend='memcache', app=self.app) + b.store_result(task_id, result, status=states.SUCCESS) + self.assertEqual(b.get_result(task_id), result) + + def test_pylibmc_bytes_key(self): + with reset_modules('celery.backends.cache'): + with self.mock_pylibmc(): + from celery.backends import cache + cache._imp = [None] + task_id, result = str_to_bytes(uuid()), 42 + b = cache.CacheBackend(backend='memcache', app=self.app) + b.store_result(task_id, result, status=states.SUCCESS) + self.assertEqual(b.get_result(task_id), result) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cassandra.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cassandra.py new file mode 100644 index 0000000..1a43be9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_cassandra.py @@ -0,0 +1,190 @@ +from __future__ import absolute_import + +import socket + +from pickle import loads, dumps + +from celery import states +from celery.exceptions import ImproperlyConfigured +from celery.tests.case import ( + AppCase, Mock, mock_module, depends_on_current_app, +) + + +class Object(object): + pass + + +def install_exceptions(mod): + # py3k: cannot catch exceptions not ineheriting from BaseException. + + class NotFoundException(Exception): + pass + + class TException(Exception): + pass + + class InvalidRequestException(Exception): + pass + + class UnavailableException(Exception): + pass + + class TimedOutException(Exception): + pass + + class AllServersUnavailable(Exception): + pass + + mod.NotFoundException = NotFoundException + mod.TException = TException + mod.InvalidRequestException = InvalidRequestException + mod.TimedOutException = TimedOutException + mod.UnavailableException = UnavailableException + mod.AllServersUnavailable = AllServersUnavailable + + +class test_CassandraBackend(AppCase): + + def setup(self): + self.app.conf.update( + CASSANDRA_SERVERS=['example.com'], + CASSANDRA_KEYSPACE='keyspace', + CASSANDRA_COLUMN_FAMILY='columns', + ) + + def test_init_no_pycassa(self): + with mock_module('pycassa'): + from celery.backends import cassandra as mod + prev, mod.pycassa = mod.pycassa, None + try: + with self.assertRaises(ImproperlyConfigured): + mod.CassandraBackend(app=self.app) + finally: + mod.pycassa = prev + + def test_init_with_and_without_LOCAL_QUROM(self): + with mock_module('pycassa'): + from celery.backends import cassandra as mod + mod.pycassa = Mock() + install_exceptions(mod.pycassa) + cons = mod.pycassa.ConsistencyLevel = Object() + cons.LOCAL_QUORUM = 'foo' + + self.app.conf.CASSANDRA_READ_CONSISTENCY = 'LOCAL_FOO' + self.app.conf.CASSANDRA_WRITE_CONSISTENCY = 'LOCAL_FOO' + + mod.CassandraBackend(app=self.app) + cons.LOCAL_FOO = 'bar' + mod.CassandraBackend(app=self.app) + + # no servers raises ImproperlyConfigured + with self.assertRaises(ImproperlyConfigured): + self.app.conf.CASSANDRA_SERVERS = None + mod.CassandraBackend( + app=self.app, keyspace='b', column_family='c', + ) + + @depends_on_current_app + def test_reduce(self): + with mock_module('pycassa'): + from celery.backends.cassandra import CassandraBackend + self.assertTrue(loads(dumps(CassandraBackend(app=self.app)))) + + def test_get_task_meta_for(self): + with mock_module('pycassa'): + from celery.backends import cassandra as mod + mod.pycassa = Mock() + install_exceptions(mod.pycassa) + mod.Thrift = Mock() + install_exceptions(mod.Thrift) + x = mod.CassandraBackend(app=self.app) + Get_Column = x._get_column_family = Mock() + get_column = Get_Column.return_value = Mock() + get = get_column.get + META = get.return_value = { + 'task_id': 'task_id', + 'status': states.SUCCESS, + 'result': '1', + 'date_done': 'date', + 'traceback': '', + 'children': None, + } + x.decode = Mock() + x.detailed_mode = False + meta = x._get_task_meta_for('task_id') + self.assertEqual(meta['status'], states.SUCCESS) + + x.detailed_mode = True + row = get.return_value = Mock() + row.values.return_value = [Mock()] + x.decode.return_value = META + meta = x._get_task_meta_for('task_id') + self.assertEqual(meta['status'], states.SUCCESS) + x.decode.return_value = Mock() + + x.detailed_mode = False + get.side_effect = KeyError() + meta = x._get_task_meta_for('task_id') + self.assertEqual(meta['status'], states.PENDING) + + calls = [0] + end = [10] + + def work_eventually(*arg): + try: + if calls[0] > end[0]: + return META + raise socket.error() + finally: + calls[0] += 1 + get.side_effect = work_eventually + x._retry_timeout = 10 + x._retry_wait = 0.01 + meta = x._get_task_meta_for('task') + self.assertEqual(meta['status'], states.SUCCESS) + + x._retry_timeout = 0.1 + calls[0], end[0] = 0, 100 + with self.assertRaises(socket.error): + x._get_task_meta_for('task') + + def test_store_result(self): + with mock_module('pycassa'): + from celery.backends import cassandra as mod + mod.pycassa = Mock() + install_exceptions(mod.pycassa) + mod.Thrift = Mock() + install_exceptions(mod.Thrift) + x = mod.CassandraBackend(app=self.app) + Get_Column = x._get_column_family = Mock() + cf = Get_Column.return_value = Mock() + x.detailed_mode = False + x._store_result('task_id', 'result', states.SUCCESS) + self.assertTrue(cf.insert.called) + + cf.insert.reset() + x.detailed_mode = True + x._store_result('task_id', 'result', states.SUCCESS) + self.assertTrue(cf.insert.called) + + def test_process_cleanup(self): + with mock_module('pycassa'): + from celery.backends import cassandra as mod + x = mod.CassandraBackend(app=self.app) + x._column_family = None + x.process_cleanup() + + x._column_family = True + x.process_cleanup() + self.assertIsNone(x._column_family) + + def test_get_column_family(self): + with mock_module('pycassa'): + from celery.backends import cassandra as mod + mod.pycassa = Mock() + install_exceptions(mod.pycassa) + x = mod.CassandraBackend(app=self.app) + self.assertTrue(x._get_column_family()) + self.assertIsNotNone(x._column_family) + self.assertIs(x._get_column_family(), x._column_family) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_couchbase.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_couchbase.py new file mode 100644 index 0000000..3dc6aad --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_couchbase.py @@ -0,0 +1,136 @@ +from __future__ import absolute_import + +from celery.backends import couchbase as module +from celery.backends.couchbase import CouchBaseBackend +from celery.exceptions import ImproperlyConfigured +from celery import backends +from celery.tests.case import ( + AppCase, MagicMock, Mock, SkipTest, patch, sentinel, +) + +try: + import couchbase +except ImportError: + couchbase = None # noqa + +COUCHBASE_BUCKET = 'celery_bucket' + + +class test_CouchBaseBackend(AppCase): + + def setup(self): + if couchbase is None: + raise SkipTest('couchbase is not installed.') + self.backend = CouchBaseBackend(app=self.app) + + def test_init_no_couchbase(self): + """test init no couchbase raises""" + prev, module.couchbase = module.couchbase, None + try: + with self.assertRaises(ImproperlyConfigured): + CouchBaseBackend(app=self.app) + finally: + module.couchbase = prev + + def test_init_no_settings(self): + """test init no settings""" + self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = [] + with self.assertRaises(ImproperlyConfigured): + CouchBaseBackend(app=self.app) + + def test_init_settings_is_None(self): + """Test init settings is None""" + self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = None + CouchBaseBackend(app=self.app) + + def test_get_connection_connection_exists(self): + with patch('couchbase.connection.Connection') as mock_Connection: + self.backend._connection = sentinel._connection + + connection = self.backend._get_connection() + + self.assertEqual(sentinel._connection, connection) + self.assertFalse(mock_Connection.called) + + def test_get(self): + """test_get + + CouchBaseBackend.get should return and take two params + db conn to couchbase is mocked. + TODO Should test on key not exists + + """ + self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = {} + x = CouchBaseBackend(app=self.app) + x._connection = Mock() + mocked_get = x._connection.get = Mock() + mocked_get.return_value.value = sentinel.retval + # should return None + self.assertEqual(x.get('1f3fab'), sentinel.retval) + x._connection.get.assert_called_once_with('1f3fab') + + def test_set(self): + """test_set + + CouchBaseBackend.set should return None and take two params + db conn to couchbase is mocked. + + """ + self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = None + x = CouchBaseBackend(app=self.app) + x._connection = MagicMock() + x._connection.set = MagicMock() + # should return None + self.assertIsNone(x.set(sentinel.key, sentinel.value)) + + def test_delete(self): + """test_delete + + CouchBaseBackend.delete should return and take two params + db conn to couchbase is mocked. + TODO Should test on key not exists + + """ + self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = {} + x = CouchBaseBackend(app=self.app) + x._connection = Mock() + mocked_delete = x._connection.delete = Mock() + mocked_delete.return_value = None + # should return None + self.assertIsNone(x.delete('1f3fab')) + x._connection.delete.assert_called_once_with('1f3fab') + + def test_config_params(self): + """test_config_params + + celery.conf.CELERY_COUCHBASE_BACKEND_SETTINGS is properly set + """ + self.app.conf.CELERY_COUCHBASE_BACKEND_SETTINGS = { + 'bucket': 'mycoolbucket', + 'host': ['here.host.com', 'there.host.com'], + 'username': 'johndoe', + 'password': 'mysecret', + 'port': '1234', + } + x = CouchBaseBackend(app=self.app) + self.assertEqual(x.bucket, 'mycoolbucket') + self.assertEqual(x.host, ['here.host.com', 'there.host.com'],) + self.assertEqual(x.username, 'johndoe',) + self.assertEqual(x.password, 'mysecret') + self.assertEqual(x.port, 1234) + + def test_backend_by_url(self, url='couchbase://myhost/mycoolbucket'): + from celery.backends.couchbase import CouchBaseBackend + backend, url_ = backends.get_backend_by_url(url, self.app.loader) + self.assertIs(backend, CouchBaseBackend) + self.assertEqual(url_, url) + + def test_backend_params_by_url(self): + url = 'couchbase://johndoe:mysecret@myhost:123/mycoolbucket' + with self.Celery(backend=url) as app: + x = app.backend + self.assertEqual(x.bucket, 'mycoolbucket') + self.assertEqual(x.host, 'myhost') + self.assertEqual(x.username, 'johndoe') + self.assertEqual(x.password, 'mysecret') + self.assertEqual(x.port, 123) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_database.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_database.py new file mode 100644 index 0000000..6b5bf94 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_database.py @@ -0,0 +1,196 @@ +from __future__ import absolute_import, unicode_literals + +from datetime import datetime + +from pickle import loads, dumps + +from celery import states +from celery.exceptions import ImproperlyConfigured +from celery.utils import uuid + +from celery.tests.case import ( + AppCase, + SkipTest, + depends_on_current_app, + mask_modules, + skip_if_pypy, + skip_if_jython, +) + +try: + import sqlalchemy # noqa +except ImportError: + DatabaseBackend = Task = TaskSet = retry = None # noqa +else: + from celery.backends.database import DatabaseBackend, retry + from celery.backends.database.models import Task, TaskSet + + +class SomeClass(object): + + def __init__(self, data): + self.data = data + + +class test_DatabaseBackend(AppCase): + + @skip_if_pypy + @skip_if_jython + def setup(self): + if DatabaseBackend is None: + raise SkipTest('sqlalchemy not installed') + self.uri = 'sqlite:///test.db' + + def test_retry_helper(self): + from celery.backends.database import DatabaseError + + calls = [0] + + @retry + def raises(): + calls[0] += 1 + raise DatabaseError(1, 2, 3) + + with self.assertRaises(DatabaseError): + raises(max_retries=5) + self.assertEqual(calls[0], 5) + + def test_missing_SQLAlchemy_raises_ImproperlyConfigured(self): + with mask_modules('sqlalchemy'): + from celery.backends.database import _sqlalchemy_installed + with self.assertRaises(ImproperlyConfigured): + _sqlalchemy_installed() + + def test_missing_dburi_raises_ImproperlyConfigured(self): + self.app.conf.CELERY_RESULT_DBURI = None + with self.assertRaises(ImproperlyConfigured): + DatabaseBackend(app=self.app) + + def test_missing_task_id_is_PENDING(self): + tb = DatabaseBackend(self.uri, app=self.app) + self.assertEqual(tb.get_status('xxx-does-not-exist'), states.PENDING) + + def test_missing_task_meta_is_dict_with_pending(self): + tb = DatabaseBackend(self.uri, app=self.app) + self.assertDictContainsSubset({ + 'status': states.PENDING, + 'task_id': 'xxx-does-not-exist-at-all', + 'result': None, + 'traceback': None + }, tb.get_task_meta('xxx-does-not-exist-at-all')) + + def test_mark_as_done(self): + tb = DatabaseBackend(self.uri, app=self.app) + + tid = uuid() + + self.assertEqual(tb.get_status(tid), states.PENDING) + self.assertIsNone(tb.get_result(tid)) + + tb.mark_as_done(tid, 42) + self.assertEqual(tb.get_status(tid), states.SUCCESS) + self.assertEqual(tb.get_result(tid), 42) + + def test_is_pickled(self): + tb = DatabaseBackend(self.uri, app=self.app) + + tid2 = uuid() + result = {'foo': 'baz', 'bar': SomeClass(12345)} + tb.mark_as_done(tid2, result) + # is serialized properly. + rindb = tb.get_result(tid2) + self.assertEqual(rindb.get('foo'), 'baz') + self.assertEqual(rindb.get('bar').data, 12345) + + def test_mark_as_started(self): + tb = DatabaseBackend(self.uri, app=self.app) + tid = uuid() + tb.mark_as_started(tid) + self.assertEqual(tb.get_status(tid), states.STARTED) + + def test_mark_as_revoked(self): + tb = DatabaseBackend(self.uri, app=self.app) + tid = uuid() + tb.mark_as_revoked(tid) + self.assertEqual(tb.get_status(tid), states.REVOKED) + + def test_mark_as_retry(self): + tb = DatabaseBackend(self.uri, app=self.app) + tid = uuid() + try: + raise KeyError('foo') + except KeyError as exception: + import traceback + trace = '\n'.join(traceback.format_stack()) + tb.mark_as_retry(tid, exception, traceback=trace) + self.assertEqual(tb.get_status(tid), states.RETRY) + self.assertIsInstance(tb.get_result(tid), KeyError) + self.assertEqual(tb.get_traceback(tid), trace) + + def test_mark_as_failure(self): + tb = DatabaseBackend(self.uri, app=self.app) + + tid3 = uuid() + try: + raise KeyError('foo') + except KeyError as exception: + import traceback + trace = '\n'.join(traceback.format_stack()) + tb.mark_as_failure(tid3, exception, traceback=trace) + self.assertEqual(tb.get_status(tid3), states.FAILURE) + self.assertIsInstance(tb.get_result(tid3), KeyError) + self.assertEqual(tb.get_traceback(tid3), trace) + + def test_forget(self): + tb = DatabaseBackend(self.uri, backend='memory://', app=self.app) + tid = uuid() + tb.mark_as_done(tid, {'foo': 'bar'}) + tb.mark_as_done(tid, {'foo': 'bar'}) + x = self.app.AsyncResult(tid, backend=tb) + x.forget() + self.assertIsNone(x.result) + + def test_process_cleanup(self): + tb = DatabaseBackend(self.uri, app=self.app) + tb.process_cleanup() + + @depends_on_current_app + def test_reduce(self): + tb = DatabaseBackend(self.uri, app=self.app) + self.assertTrue(loads(dumps(tb))) + + def test_save__restore__delete_group(self): + tb = DatabaseBackend(self.uri, app=self.app) + + tid = uuid() + res = {'something': 'special'} + self.assertEqual(tb.save_group(tid, res), res) + + res2 = tb.restore_group(tid) + self.assertEqual(res2, res) + + tb.delete_group(tid) + self.assertIsNone(tb.restore_group(tid)) + + self.assertIsNone(tb.restore_group('xxx-nonexisting-id')) + + def test_cleanup(self): + tb = DatabaseBackend(self.uri, app=self.app) + for i in range(10): + tb.mark_as_done(uuid(), 42) + tb.save_group(uuid(), {'foo': 'bar'}) + s = tb.ResultSession() + for t in s.query(Task).all(): + t.date_done = datetime.now() - tb.expires * 2 + for t in s.query(TaskSet).all(): + t.date_done = datetime.now() - tb.expires * 2 + s.commit() + s.close() + + tb.cleanup() + + def test_Task__repr__(self): + self.assertIn('foo', repr(Task('foo'))) + + def test_TaskSet__repr__(self): + self.assertIn('foo', repr(TaskSet('foo', None))) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_mongodb.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_mongodb.py new file mode 100644 index 0000000..bce429f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_mongodb.py @@ -0,0 +1,366 @@ +from __future__ import absolute_import + +import datetime +import uuid + +from pickle import loads, dumps + +from celery import states +from celery.backends import mongodb as module +from celery.backends.mongodb import MongoBackend, pymongo +from celery.exceptions import ImproperlyConfigured +from celery.tests.case import ( + AppCase, MagicMock, Mock, SkipTest, ANY, + depends_on_current_app, disable_stdouts, patch, sentinel, +) + +COLLECTION = 'taskmeta_celery' +TASK_ID = str(uuid.uuid1()) +MONGODB_HOST = 'localhost' +MONGODB_PORT = 27017 +MONGODB_USER = 'mongo' +MONGODB_PASSWORD = '1234' +MONGODB_DATABASE = 'testing' +MONGODB_COLLECTION = 'collection1' + + +class test_MongoBackend(AppCase): + + default_url = 'mongodb://uuuu:pwpw@hostname.dom/database' + replica_set_url = ( + 'mongodb://uuuu:pwpw@hostname.dom,' + 'hostname.dom/database?replicaSet=rs' + ) + sanitized_default_url = 'mongodb://uuuu:**@hostname.dom/database' + sanitized_replica_set_url = ( + 'mongodb://uuuu:**@hostname.dom/,' + 'hostname.dom/database?replicaSet=rs' + ) + + def setup(self): + if pymongo is None: + raise SkipTest('pymongo is not installed.') + + R = self._reset = {} + R['encode'], MongoBackend.encode = MongoBackend.encode, Mock() + R['decode'], MongoBackend.decode = MongoBackend.decode, Mock() + R['Binary'], module.Binary = module.Binary, Mock() + R['datetime'], datetime.datetime = datetime.datetime, Mock() + + self.backend = MongoBackend(app=self.app, url=self.default_url) + + def teardown(self): + MongoBackend.encode = self._reset['encode'] + MongoBackend.decode = self._reset['decode'] + module.Binary = self._reset['Binary'] + datetime.datetime = self._reset['datetime'] + + def test_init_no_mongodb(self): + prev, module.pymongo = module.pymongo, None + try: + with self.assertRaises(ImproperlyConfigured): + MongoBackend(app=self.app) + finally: + module.pymongo = prev + + def test_init_no_settings(self): + self.app.conf.CELERY_MONGODB_BACKEND_SETTINGS = [] + with self.assertRaises(ImproperlyConfigured): + MongoBackend(app=self.app) + + def test_init_settings_is_None(self): + self.app.conf.CELERY_MONGODB_BACKEND_SETTINGS = None + MongoBackend(app=self.app) + + def test_restore_group_no_entry(self): + x = MongoBackend(app=self.app) + x.collection = Mock() + fo = x.collection.find_one = Mock() + fo.return_value = None + self.assertIsNone(x._restore_group('1f3fab')) + + @depends_on_current_app + def test_reduce(self): + x = MongoBackend(app=self.app) + self.assertTrue(loads(dumps(x))) + + def test_get_connection_connection_exists(self): + + with patch('pymongo.MongoClient') as mock_Connection: + self.backend._connection = sentinel._connection + + connection = self.backend._get_connection() + + self.assertEqual(sentinel._connection, connection) + self.assertFalse(mock_Connection.called) + + def test_get_connection_no_connection_host(self): + + with patch('pymongo.MongoClient') as mock_Connection: + self.backend._connection = None + self.backend.host = MONGODB_HOST + self.backend.port = MONGODB_PORT + mock_Connection.return_value = sentinel.connection + + connection = self.backend._get_connection() + mock_Connection.assert_called_once_with( + host='mongodb://localhost:27017', + **self.backend._prepare_client_options() + ) + self.assertEqual(sentinel.connection, connection) + + def test_get_connection_no_connection_mongodb_uri(self): + + with patch('pymongo.MongoClient') as mock_Connection: + mongodb_uri = 'mongodb://%s:%d' % (MONGODB_HOST, MONGODB_PORT) + self.backend._connection = None + self.backend.host = mongodb_uri + + mock_Connection.return_value = sentinel.connection + + connection = self.backend._get_connection() + mock_Connection.assert_called_once_with( + host=mongodb_uri, **self.backend._prepare_client_options() + ) + self.assertEqual(sentinel.connection, connection) + + @patch('celery.backends.mongodb.MongoBackend._get_connection') + def test_get_database_no_existing(self, mock_get_connection): + # Should really check for combinations of these two, to be complete. + self.backend.user = MONGODB_USER + self.backend.password = MONGODB_PASSWORD + + mock_database = Mock() + mock_connection = MagicMock(spec=['__getitem__']) + mock_connection.__getitem__.return_value = mock_database + mock_get_connection.return_value = mock_connection + + database = self.backend.database + + self.assertTrue(database is mock_database) + self.assertTrue(self.backend.__dict__['database'] is mock_database) + mock_database.authenticate.assert_called_once_with( + MONGODB_USER, MONGODB_PASSWORD) + + @patch('celery.backends.mongodb.MongoBackend._get_connection') + def test_get_database_no_existing_no_auth(self, mock_get_connection): + # Should really check for combinations of these two, to be complete. + self.backend.user = None + self.backend.password = None + + mock_database = Mock() + mock_connection = MagicMock(spec=['__getitem__']) + mock_connection.__getitem__.return_value = mock_database + mock_get_connection.return_value = mock_connection + + database = self.backend.database + + self.assertTrue(database is mock_database) + self.assertFalse(mock_database.authenticate.called) + self.assertTrue(self.backend.__dict__['database'] is mock_database) + + def test_process_cleanup(self): + self.backend._connection = None + self.backend.process_cleanup() + self.assertEqual(self.backend._connection, None) + + self.backend._connection = 'not none' + self.backend.process_cleanup() + self.assertEqual(self.backend._connection, None) + + @patch('celery.backends.mongodb.MongoBackend._get_database') + def test_store_result(self, mock_get_database): + self.backend.taskmeta_collection = MONGODB_COLLECTION + + mock_database = MagicMock(spec=['__getitem__', '__setitem__']) + mock_collection = Mock() + + mock_get_database.return_value = mock_database + mock_database.__getitem__.return_value = mock_collection + + ret_val = self.backend._store_result( + sentinel.task_id, sentinel.result, sentinel.status) + + mock_get_database.assert_called_once_with() + mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) + mock_collection.save.assert_called_once_with(ANY) + self.assertEqual(sentinel.result, ret_val) + + @patch('celery.backends.mongodb.MongoBackend._get_database') + def test_get_task_meta_for(self, mock_get_database): + datetime.datetime = self._reset['datetime'] + self.backend.taskmeta_collection = MONGODB_COLLECTION + + mock_database = MagicMock(spec=['__getitem__', '__setitem__']) + mock_collection = Mock() + mock_collection.find_one.return_value = MagicMock() + + mock_get_database.return_value = mock_database + mock_database.__getitem__.return_value = mock_collection + + ret_val = self.backend._get_task_meta_for(sentinel.task_id) + + mock_get_database.assert_called_once_with() + mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) + self.assertEqual( + list(sorted(['status', 'task_id', 'date_done', 'traceback', + 'result', 'children'])), + list(sorted(ret_val.keys())), + ) + + @patch('celery.backends.mongodb.MongoBackend._get_database') + def test_get_task_meta_for_no_result(self, mock_get_database): + self.backend.taskmeta_collection = MONGODB_COLLECTION + + mock_database = MagicMock(spec=['__getitem__', '__setitem__']) + mock_collection = Mock() + mock_collection.find_one.return_value = None + + mock_get_database.return_value = mock_database + mock_database.__getitem__.return_value = mock_collection + + ret_val = self.backend._get_task_meta_for(sentinel.task_id) + + mock_get_database.assert_called_once_with() + mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) + self.assertEqual({'status': states.PENDING, 'result': None}, ret_val) + + @patch('celery.backends.mongodb.MongoBackend._get_database') + def test_save_group(self, mock_get_database): + self.backend.taskmeta_collection = MONGODB_COLLECTION + + mock_database = MagicMock(spec=['__getitem__', '__setitem__']) + mock_collection = Mock() + + mock_get_database.return_value = mock_database + mock_database.__getitem__.return_value = mock_collection + + ret_val = self.backend._save_group( + sentinel.taskset_id, sentinel.result) + + mock_get_database.assert_called_once_with() + mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) + mock_collection.save.assert_called_once_with(ANY) + self.assertEqual(sentinel.result, ret_val) + + @patch('celery.backends.mongodb.MongoBackend._get_database') + def test_restore_group(self, mock_get_database): + self.backend.taskmeta_collection = MONGODB_COLLECTION + + mock_database = MagicMock(spec=['__getitem__', '__setitem__']) + mock_collection = Mock() + mock_collection.find_one.return_value = MagicMock() + + mock_get_database.return_value = mock_database + mock_database.__getitem__.return_value = mock_collection + + ret_val = self.backend._restore_group(sentinel.taskset_id) + + mock_get_database.assert_called_once_with() + mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) + mock_collection.find_one.assert_called_once_with( + {'_id': sentinel.taskset_id}) + self.assertItemsEqual( + ['date_done', 'result', 'task_id'], + list(ret_val.keys()), + ) + + @patch('celery.backends.mongodb.MongoBackend._get_database') + def test_delete_group(self, mock_get_database): + self.backend.taskmeta_collection = MONGODB_COLLECTION + + mock_database = MagicMock(spec=['__getitem__', '__setitem__']) + mock_collection = Mock() + + mock_get_database.return_value = mock_database + mock_database.__getitem__.return_value = mock_collection + + self.backend._delete_group(sentinel.taskset_id) + + mock_get_database.assert_called_once_with() + mock_database.__getitem__.assert_called_once_with(MONGODB_COLLECTION) + mock_collection.remove.assert_called_once_with( + {'_id': sentinel.taskset_id}) + + @patch('celery.backends.mongodb.MongoBackend._get_database') + def test_forget(self, mock_get_database): + self.backend.taskmeta_collection = MONGODB_COLLECTION + + mock_database = MagicMock(spec=['__getitem__', '__setitem__']) + mock_collection = Mock() + + mock_get_database.return_value = mock_database + mock_database.__getitem__.return_value = mock_collection + + self.backend._forget(sentinel.task_id) + + mock_get_database.assert_called_once_with() + mock_database.__getitem__.assert_called_once_with( + MONGODB_COLLECTION) + mock_collection.remove.assert_called_once_with( + {'_id': sentinel.task_id}) + + @patch('celery.backends.mongodb.MongoBackend._get_database') + def test_cleanup(self, mock_get_database): + datetime.datetime = self._reset['datetime'] + self.backend.taskmeta_collection = MONGODB_COLLECTION + + mock_database = MagicMock(spec=['__getitem__', '__setitem__']) + self.backend.collections = mock_collection = Mock() + + mock_get_database.return_value = mock_database + mock_database.__getitem__.return_value = mock_collection + + self.backend.app.now = datetime.datetime.utcnow + self.backend.cleanup() + + mock_get_database.assert_called_once_with() + mock_database.__getitem__.assert_called_once_with( + MONGODB_COLLECTION) + self.assertTrue(mock_collection.remove.called) + + def test_get_database_authfailure(self): + x = MongoBackend(app=self.app) + x._get_connection = Mock() + conn = x._get_connection.return_value = {} + db = conn[x.database_name] = Mock() + db.authenticate.return_value = False + x.user = 'jerry' + x.password = 'cere4l' + with self.assertRaises(ImproperlyConfigured): + x._get_database() + db.authenticate.assert_called_with('jerry', 'cere4l') + + @patch('celery.backends.mongodb.detect_environment') + def test_prepare_client_options_for_ver_2(self, m_detect_env): + m_detect_env.return_value = 'default' + with patch('pymongo.version_tuple', new=(2, 6, 3)): + options = self.backend._prepare_client_options() + self.assertDictEqual(options, { + 'max_pool_size': self.backend.max_pool_size, + 'auto_start_request': False + }) + + def test_as_uri_include_password(self): + self.assertEqual(self.backend.as_uri(True), self.default_url) + + def test_as_uri_exclude_password(self): + self.assertEqual(self.backend.as_uri(), self.sanitized_default_url) + + def test_as_uri_include_password_replica_set(self): + backend = MongoBackend(app=self.app, url=self.replica_set_url) + self.assertEqual(backend.as_uri(True), self.replica_set_url) + + def test_as_uri_exclude_password_replica_set(self): + backend = MongoBackend(app=self.app, url=self.replica_set_url) + self.assertEqual(backend.as_uri(), self.sanitized_replica_set_url) + + @disable_stdouts + def test_regression_worker_startup_info(self): + self.app.conf.result_backend = ( + 'mongodb://user:password@host0.com:43437,host1.com:43437' + '/work4us?replicaSet=rs&ssl=true' + ) + worker = self.app.Worker() + worker.on_start() + self.assertTrue(worker.startup_info()) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_redis.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_redis.py new file mode 100644 index 0000000..a0de4b7 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_redis.py @@ -0,0 +1,282 @@ +from __future__ import absolute_import + +from datetime import timedelta + +from pickle import loads, dumps + +from celery import signature +from celery import states +from celery import group +from celery import uuid +from celery.datastructures import AttributeDict +from celery.exceptions import ImproperlyConfigured +from celery.utils.timeutils import timedelta_seconds + +from celery.tests.case import ( + AppCase, Mock, MockCallbacks, SkipTest, depends_on_current_app, patch, +) + + +class Connection(object): + connected = True + + def disconnect(self): + self.connected = False + + +class Pipeline(object): + + def __init__(self, client): + self.client = client + self.steps = [] + + def __getattr__(self, attr): + + def add_step(*args, **kwargs): + self.steps.append((getattr(self.client, attr), args, kwargs)) + return self + return add_step + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + pass + + def execute(self): + return [step(*a, **kw) for step, a, kw in self.steps] + + +class Redis(MockCallbacks): + Connection = Connection + Pipeline = Pipeline + + def __init__(self, host=None, port=None, db=None, password=None, **kw): + self.host = host + self.port = port + self.db = db + self.password = password + self.keyspace = {} + self.expiry = {} + self.connection = self.Connection() + + def get(self, key): + return self.keyspace.get(key) + + def setex(self, key, value, expires): + self.set(key, value) + self.expire(key, expires) + + def set(self, key, value): + self.keyspace[key] = value + + def expire(self, key, expires): + self.expiry[key] = expires + return expires + + def delete(self, key): + return bool(self.keyspace.pop(key, None)) + + def pipeline(self): + return self.Pipeline(self) + + def _get_list(self, key): + try: + return self.keyspace[key] + except KeyError: + l = self.keyspace[key] = [] + return l + + def rpush(self, key, value): + self._get_list(key).append(value) + + def lrange(self, key, start, stop): + return self._get_list(key)[start:stop] + + def llen(self, key): + return len(self.keyspace.get(key) or []) + + +class redis(object): + VERSION = (2, 4, 10) + Redis = Redis + + class ConnectionPool(object): + + def __init__(self, **kwargs): + pass + + class UnixDomainSocketConnection(object): + + def __init__(self, **kwargs): + pass + + +class test_RedisBackend(AppCase): + + def get_backend(self): + from celery.backends.redis import RedisBackend + + class _RedisBackend(RedisBackend): + redis = redis + + return _RedisBackend + + def setup(self): + self.Backend = self.get_backend() + + @depends_on_current_app + def test_reduce(self): + try: + from celery.backends.redis import RedisBackend + x = RedisBackend(app=self.app, new_join=True) + self.assertTrue(loads(dumps(x))) + except ImportError: + raise SkipTest('redis not installed') + + def test_no_redis(self): + self.Backend.redis = None + with self.assertRaises(ImproperlyConfigured): + self.Backend(app=self.app, new_join=True) + + def test_url(self): + x = self.Backend( + 'redis://:bosco@vandelay.com:123//1', app=self.app, + new_join=True, + ) + self.assertTrue(x.connparams) + self.assertEqual(x.connparams['host'], 'vandelay.com') + self.assertEqual(x.connparams['db'], 1) + self.assertEqual(x.connparams['port'], 123) + self.assertEqual(x.connparams['password'], 'bosco') + + def test_socket_url(self): + x = self.Backend( + 'socket:///tmp/redis.sock?virtual_host=/3', app=self.app, + new_join=True, + ) + self.assertTrue(x.connparams) + self.assertEqual(x.connparams['path'], '/tmp/redis.sock') + self.assertIs( + x.connparams['connection_class'], + redis.UnixDomainSocketConnection, + ) + self.assertNotIn('host', x.connparams) + self.assertNotIn('port', x.connparams) + self.assertEqual(x.connparams['db'], 3) + + def test_compat_propertie(self): + x = self.Backend( + 'redis://:bosco@vandelay.com:123//1', app=self.app, + new_join=True, + ) + with self.assertPendingDeprecation(): + self.assertEqual(x.host, 'vandelay.com') + with self.assertPendingDeprecation(): + self.assertEqual(x.db, 1) + with self.assertPendingDeprecation(): + self.assertEqual(x.port, 123) + with self.assertPendingDeprecation(): + self.assertEqual(x.password, 'bosco') + + def test_conf_raises_KeyError(self): + self.app.conf = AttributeDict({ + 'CELERY_RESULT_SERIALIZER': 'json', + 'CELERY_MAX_CACHED_RESULTS': 1, + 'CELERY_ACCEPT_CONTENT': ['json'], + 'CELERY_TASK_RESULT_EXPIRES': None, + }) + self.Backend(app=self.app, new_join=True) + + def test_expires_defaults_to_config(self): + self.app.conf.CELERY_TASK_RESULT_EXPIRES = 10 + b = self.Backend(expires=None, app=self.app, new_join=True) + self.assertEqual(b.expires, 10) + + def test_expires_is_int(self): + b = self.Backend(expires=48, app=self.app, new_join=True) + self.assertEqual(b.expires, 48) + + def test_set_new_join_from_url_query(self): + b = self.Backend('redis://?new_join=True;foobar=1', app=self.app) + self.assertEqual(b.on_chord_part_return, b._new_chord_return) + self.assertEqual(b.apply_chord, b._new_chord_apply) + + def test_default_is_old_join(self): + b = self.Backend(app=self.app) + self.assertNotEqual(b.on_chord_part_return, b._new_chord_return) + self.assertNotEqual(b.apply_chord, b._new_chord_apply) + + def test_expires_is_None(self): + b = self.Backend(expires=None, app=self.app, new_join=True) + self.assertEqual(b.expires, timedelta_seconds( + self.app.conf.CELERY_TASK_RESULT_EXPIRES)) + + def test_expires_is_timedelta(self): + b = self.Backend( + expires=timedelta(minutes=1), app=self.app, new_join=1, + ) + self.assertEqual(b.expires, 60) + + def test_apply_chord(self): + self.Backend(app=self.app, new_join=True).apply_chord( + group(app=self.app), (), 'group_id', {}, + result=[self.app.AsyncResult(x) for x in [1, 2, 3]], + ) + + def test_mget(self): + b = self.Backend(app=self.app, new_join=True) + self.assertTrue(b.mget(['a', 'b', 'c'])) + b.client.mget.assert_called_with(['a', 'b', 'c']) + + def test_set_no_expire(self): + b = self.Backend(app=self.app, new_join=True) + b.expires = None + b.set('foo', 'bar') + + @patch('celery.result.GroupResult.restore') + def test_on_chord_part_return(self, restore): + b = self.Backend(app=self.app, new_join=True) + + def create_task(): + tid = uuid() + task = Mock(name='task-{0}'.format(tid)) + task.name = 'foobarbaz' + self.app.tasks['foobarbaz'] = task + task.request.chord = signature(task) + task.request.id = tid + task.request.chord['chord_size'] = 10 + task.request.group = 'group_id' + return task + + tasks = [create_task() for i in range(10)] + + for i in range(10): + b.on_chord_part_return(tasks[i], states.SUCCESS, i) + self.assertTrue(b.client.rpush.call_count) + b.client.rpush.reset_mock() + self.assertTrue(b.client.lrange.call_count) + gkey = b.get_key_for_group('group_id', '.j') + b.client.delete.assert_called_with(gkey) + b.client.expire.assert_called_with(gkey, 86400) + + def test_process_cleanup(self): + self.Backend(app=self.app, new_join=True).process_cleanup() + + def test_get_set_forget(self): + b = self.Backend(app=self.app, new_join=True) + tid = uuid() + b.store_result(tid, 42, states.SUCCESS) + self.assertEqual(b.get_status(tid), states.SUCCESS) + self.assertEqual(b.get_result(tid), 42) + b.forget(tid) + self.assertEqual(b.get_status(tid), states.PENDING) + + def test_set_expires(self): + b = self.Backend(expires=512, app=self.app, new_join=True) + tid = uuid() + key = b.get_key_for_task(tid) + b.store_result(tid, 42, states.SUCCESS) + b.client.expire.assert_called_with( + key, 512, + ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_rpc.py b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_rpc.py new file mode 100644 index 0000000..6fe594c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/backends/test_rpc.py @@ -0,0 +1,75 @@ +from __future__ import absolute_import + +from celery.backends.rpc import RPCBackend +from celery._state import _task_stack + +from celery.tests.case import AppCase, Mock, patch + + +class test_RPCBackend(AppCase): + + def setup(self): + self.b = RPCBackend(app=self.app) + + def test_oid(self): + oid = self.b.oid + oid2 = self.b.oid + self.assertEqual(oid, oid2) + self.assertEqual(oid, self.app.oid) + + def test_interface(self): + self.b.on_reply_declare('task_id') + + def test_destination_for(self): + req = Mock(name='request') + req.reply_to = 'reply_to' + req.correlation_id = 'corid' + self.assertTupleEqual( + self.b.destination_for('task_id', req), + ('reply_to', 'corid'), + ) + task = Mock() + _task_stack.push(task) + try: + task.request.reply_to = 'reply_to' + task.request.correlation_id = 'corid' + self.assertTupleEqual( + self.b.destination_for('task_id', None), + ('reply_to', 'corid'), + ) + finally: + _task_stack.pop() + + with self.assertRaises(RuntimeError): + self.b.destination_for('task_id', None) + + def test_binding(self): + queue = self.b.binding + self.assertEqual(queue.name, self.b.oid) + self.assertEqual(queue.exchange, self.b.exchange) + self.assertEqual(queue.routing_key, self.b.oid) + self.assertFalse(queue.durable) + self.assertFalse(queue.auto_delete) + + def test_many_bindings(self): + self.assertListEqual( + self.b._many_bindings(['a', 'b']), + [self.b.binding], + ) + + def test_create_binding(self): + self.assertEqual(self.b._create_binding('id'), self.b.binding) + + def test_on_task_call(self): + with patch('celery.backends.rpc.maybe_declare') as md: + with self.app.amqp.producer_pool.acquire() as prod: + self.b.on_task_call(prod, 'task_id'), + md.assert_called_with( + self.b.binding(prod.channel), + retry=True, + ) + + def test_create_exchange(self): + ex = self.b._create_exchange('name') + self.assertIsInstance(ex, self.b.Exchange) + self.assertEqual(ex.name, '') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/__init__.py new file mode 100644 index 0000000..ffe8fb0 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/__init__.py @@ -0,0 +1,5 @@ +from __future__ import absolute_import + +from celery import Celery + +hello = Celery(set_as_current=False) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/app.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/app.py new file mode 100644 index 0000000..f1fb15e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/proj/app.py @@ -0,0 +1,5 @@ +from __future__ import absolute_import + +from celery import Celery + +app = Celery(set_as_current=False) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_amqp.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_amqp.py new file mode 100644 index 0000000..8840a9f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_amqp.py @@ -0,0 +1,153 @@ +from __future__ import absolute_import + +from celery.bin.amqp import ( + AMQPAdmin, + AMQShell, + dump_message, + amqp, + main, +) + +from celery.tests.case import AppCase, Mock, WhateverIO, patch + + +class test_AMQShell(AppCase): + + def setup(self): + self.fh = WhateverIO() + self.adm = self.create_adm() + self.shell = AMQShell(connect=self.adm.connect, out=self.fh) + + def create_adm(self, *args, **kwargs): + return AMQPAdmin(app=self.app, out=self.fh, *args, **kwargs) + + def test_queue_declare(self): + self.shell.onecmd('queue.declare foo') + self.assertIn('ok', self.fh.getvalue()) + + def test_missing_command(self): + self.shell.onecmd('foo foo') + self.assertIn('unknown syntax', self.fh.getvalue()) + + def RV(self): + raise Exception(self.fh.getvalue()) + + def test_spec_format_response(self): + spec = self.shell.amqp['exchange.declare'] + self.assertEqual(spec.format_response(None), 'ok.') + self.assertEqual(spec.format_response('NO'), 'NO') + + def test_missing_namespace(self): + self.shell.onecmd('ns.cmd arg') + self.assertIn('unknown syntax', self.fh.getvalue()) + + def test_help(self): + self.shell.onecmd('help') + self.assertIn('Example:', self.fh.getvalue()) + + def test_help_command(self): + self.shell.onecmd('help queue.declare') + self.assertIn('passive:no', self.fh.getvalue()) + + def test_help_unknown_command(self): + self.shell.onecmd('help foo.baz') + self.assertIn('unknown syntax', self.fh.getvalue()) + + def test_onecmd_error(self): + self.shell.dispatch = Mock() + self.shell.dispatch.side_effect = MemoryError() + self.shell.say = Mock() + self.assertFalse(self.shell.needs_reconnect) + self.shell.onecmd('hello') + self.assertTrue(self.shell.say.called) + self.assertTrue(self.shell.needs_reconnect) + + def test_exit(self): + with self.assertRaises(SystemExit): + self.shell.onecmd('exit') + self.assertIn("don't leave!", self.fh.getvalue()) + + def test_note_silent(self): + self.shell.silent = True + self.shell.note('foo bar') + self.assertNotIn('foo bar', self.fh.getvalue()) + + def test_reconnect(self): + self.shell.onecmd('queue.declare foo') + self.shell.needs_reconnect = True + self.shell.onecmd('queue.delete foo') + + def test_completenames(self): + self.assertEqual( + self.shell.completenames('queue.dec'), + ['queue.declare'], + ) + self.assertEqual( + sorted(self.shell.completenames('declare')), + sorted(['queue.declare', 'exchange.declare']), + ) + + def test_empty_line(self): + self.shell.emptyline = Mock() + self.shell.default = Mock() + self.shell.onecmd('') + self.shell.emptyline.assert_called_with() + self.shell.onecmd('foo') + self.shell.default.assert_called_with('foo') + + def test_respond(self): + self.shell.respond({'foo': 'bar'}) + self.assertIn('foo', self.fh.getvalue()) + + def test_prompt(self): + self.assertTrue(self.shell.prompt) + + def test_no_returns(self): + self.shell.onecmd('queue.declare foo') + self.shell.onecmd('exchange.declare bar direct yes') + self.shell.onecmd('queue.bind foo bar baz') + self.shell.onecmd('basic.ack 1') + + def test_dump_message(self): + m = Mock() + m.body = 'the quick brown fox' + m.properties = {'a': 1} + m.delivery_info = {'exchange': 'bar'} + self.assertTrue(dump_message(m)) + + def test_dump_message_no_message(self): + self.assertIn('No messages in queue', dump_message(None)) + + def test_note(self): + self.adm.silent = True + self.adm.note('FOO') + self.assertNotIn('FOO', self.fh.getvalue()) + + def test_run(self): + a = self.create_adm('queue.declare foo') + a.run() + self.assertIn('ok', self.fh.getvalue()) + + def test_run_loop(self): + a = self.create_adm() + a.Shell = Mock() + shell = a.Shell.return_value = Mock() + shell.cmdloop = Mock() + a.run() + shell.cmdloop.assert_called_with() + + shell.cmdloop.side_effect = KeyboardInterrupt() + a.run() + self.assertIn('bibi', self.fh.getvalue()) + + @patch('celery.bin.amqp.amqp') + def test_main(self, Command): + c = Command.return_value = Mock() + main() + c.execute_from_commandline.assert_called_with() + + @patch('celery.bin.amqp.AMQPAdmin') + def test_command(self, cls): + x = amqp(app=self.app) + x.run() + self.assertIs(cls.call_args[1]['app'], self.app) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_base.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_base.py new file mode 100644 index 0000000..61d56fe --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_base.py @@ -0,0 +1,332 @@ +from __future__ import absolute_import + +import os + +from celery.bin.base import ( + Command, + Option, + Extensions, + HelpFormatter, +) +from celery.tests.case import ( + AppCase, Mock, depends_on_current_app, override_stdouts, patch, +) + + +class Object(object): + pass + + +class MyApp(object): + user_options = {'preload': None} + +APP = MyApp() # <-- Used by test_with_custom_app + + +class MockCommand(Command): + mock_args = ('arg1', 'arg2', 'arg3') + + def parse_options(self, prog_name, arguments, command=None): + options = Object() + options.foo = 'bar' + options.prog_name = prog_name + return options, self.mock_args + + def run(self, *args, **kwargs): + return args, kwargs + + +class test_Extensions(AppCase): + + def test_load(self): + with patch('pkg_resources.iter_entry_points') as iterep: + with patch('celery.bin.base.symbol_by_name') as symbyname: + ep = Mock() + ep.name = 'ep' + ep.module_name = 'foo' + ep.attrs = ['bar', 'baz'] + iterep.return_value = [ep] + cls = symbyname.return_value = Mock() + register = Mock() + e = Extensions('unit', register) + e.load() + symbyname.assert_called_with('foo:bar') + register.assert_called_with(cls, name='ep') + + with patch('celery.bin.base.symbol_by_name') as symbyname: + symbyname.side_effect = SyntaxError() + with patch('warnings.warn') as warn: + e.load() + self.assertTrue(warn.called) + + with patch('celery.bin.base.symbol_by_name') as symbyname: + symbyname.side_effect = KeyError('foo') + with self.assertRaises(KeyError): + e.load() + + +class test_HelpFormatter(AppCase): + + def test_format_epilog(self): + f = HelpFormatter() + self.assertTrue(f.format_epilog('hello')) + self.assertFalse(f.format_epilog('')) + + def test_format_description(self): + f = HelpFormatter() + self.assertTrue(f.format_description('hello')) + + +class test_Command(AppCase): + + def test_get_options(self): + cmd = Command() + cmd.option_list = (1, 2, 3) + self.assertTupleEqual(cmd.get_options(), (1, 2, 3)) + + def test_custom_description(self): + + class C(Command): + description = 'foo' + + c = C() + self.assertEqual(c.description, 'foo') + + def test_register_callbacks(self): + c = Command(on_error=8, on_usage_error=9) + self.assertEqual(c.on_error, 8) + self.assertEqual(c.on_usage_error, 9) + + def test_run_raises_UsageError(self): + cb = Mock() + c = Command(on_usage_error=cb) + c.verify_args = Mock() + c.run = Mock() + exc = c.run.side_effect = c.UsageError('foo', status=3) + + self.assertEqual(c(), exc.status) + cb.assert_called_with(exc) + c.verify_args.assert_called_with(()) + + def test_default_on_usage_error(self): + cmd = Command() + cmd.handle_error = Mock() + exc = Exception() + cmd.on_usage_error(exc) + cmd.handle_error.assert_called_with(exc) + + def test_verify_args_missing(self): + c = Command() + + def run(a, b, c): + pass + c.run = run + + with self.assertRaises(c.UsageError): + c.verify_args((1, )) + c.verify_args((1, 2, 3)) + + def test_run_interface(self): + with self.assertRaises(NotImplementedError): + Command().run() + + @patch('sys.stdout') + def test_early_version(self, stdout): + cmd = Command() + with self.assertRaises(SystemExit): + cmd.early_version(['--version']) + + def test_execute_from_commandline(self): + cmd = MockCommand(app=self.app) + args1, kwargs1 = cmd.execute_from_commandline() # sys.argv + self.assertTupleEqual(args1, cmd.mock_args) + self.assertDictContainsSubset({'foo': 'bar'}, kwargs1) + self.assertTrue(kwargs1.get('prog_name')) + args2, kwargs2 = cmd.execute_from_commandline(['foo']) # pass list + self.assertTupleEqual(args2, cmd.mock_args) + self.assertDictContainsSubset({'foo': 'bar', 'prog_name': 'foo'}, + kwargs2) + + def test_with_bogus_args(self): + with override_stdouts() as (_, stderr): + cmd = MockCommand(app=self.app) + cmd.supports_args = False + with self.assertRaises(SystemExit): + cmd.execute_from_commandline(argv=['--bogus']) + self.assertTrue(stderr.getvalue()) + self.assertIn('Unrecognized', stderr.getvalue()) + + def test_with_custom_config_module(self): + prev = os.environ.pop('CELERY_CONFIG_MODULE', None) + try: + cmd = MockCommand(app=self.app) + cmd.setup_app_from_commandline(['--config=foo.bar.baz']) + self.assertEqual(os.environ.get('CELERY_CONFIG_MODULE'), + 'foo.bar.baz') + finally: + if prev: + os.environ['CELERY_CONFIG_MODULE'] = prev + else: + os.environ.pop('CELERY_CONFIG_MODULE', None) + + def test_with_custom_broker(self): + prev = os.environ.pop('CELERY_BROKER_URL', None) + try: + cmd = MockCommand(app=self.app) + cmd.setup_app_from_commandline(['--broker=xyzza://']) + self.assertEqual( + os.environ.get('CELERY_BROKER_URL'), 'xyzza://', + ) + finally: + if prev: + os.environ['CELERY_BROKER_URL'] = prev + else: + os.environ.pop('CELERY_BROKER_URL', None) + + def test_with_custom_app(self): + cmd = MockCommand(app=self.app) + app = '.'.join([__name__, 'APP']) + cmd.setup_app_from_commandline(['--app=%s' % (app, ), + '--loglevel=INFO']) + self.assertIs(cmd.app, APP) + cmd.setup_app_from_commandline(['-A', app, + '--loglevel=INFO']) + self.assertIs(cmd.app, APP) + + def test_setup_app_sets_quiet(self): + cmd = MockCommand(app=self.app) + cmd.setup_app_from_commandline(['-q']) + self.assertTrue(cmd.quiet) + cmd2 = MockCommand(app=self.app) + cmd2.setup_app_from_commandline(['--quiet']) + self.assertTrue(cmd2.quiet) + + def test_setup_app_sets_chdir(self): + with patch('os.chdir') as chdir: + cmd = MockCommand(app=self.app) + cmd.setup_app_from_commandline(['--workdir=/opt']) + chdir.assert_called_with('/opt') + + def test_setup_app_sets_loader(self): + prev = os.environ.get('CELERY_LOADER') + try: + cmd = MockCommand(app=self.app) + cmd.setup_app_from_commandline(['--loader=X.Y:Z']) + self.assertEqual(os.environ['CELERY_LOADER'], 'X.Y:Z') + finally: + if prev is not None: + os.environ['CELERY_LOADER'] = prev + + def test_setup_app_no_respect(self): + cmd = MockCommand(app=self.app) + cmd.respects_app_option = False + with patch('celery.bin.base.Celery') as cp: + cmd.setup_app_from_commandline(['--app=x.y:z']) + self.assertTrue(cp.called) + + def test_setup_app_custom_app(self): + cmd = MockCommand(app=self.app) + app = cmd.app = Mock() + app.user_options = {'preload': None} + cmd.setup_app_from_commandline([]) + self.assertEqual(cmd.app, app) + + def test_find_app_suspects(self): + cmd = MockCommand(app=self.app) + self.assertTrue(cmd.find_app('celery.tests.bin.proj.app')) + self.assertTrue(cmd.find_app('celery.tests.bin.proj')) + self.assertTrue(cmd.find_app('celery.tests.bin.proj:hello')) + self.assertTrue(cmd.find_app('celery.tests.bin.proj.app:app')) + + with self.assertRaises(AttributeError): + cmd.find_app(__name__) + + def test_host_format(self): + cmd = MockCommand(app=self.app) + with patch('socket.gethostname') as hn: + hn.return_value = 'blacktron.example.com' + self.assertEqual(cmd.host_format(''), '') + self.assertEqual( + cmd.host_format('celery@%h'), + 'celery@blacktron.example.com', + ) + self.assertEqual( + cmd.host_format('celery@%d'), + 'celery@example.com', + ) + self.assertEqual( + cmd.host_format('celery@%n'), + 'celery@blacktron', + ) + + def test_say_chat_quiet(self): + cmd = MockCommand(app=self.app) + cmd.quiet = True + self.assertIsNone(cmd.say_chat('<-', 'foo', 'foo')) + + def test_say_chat_show_body(self): + cmd = MockCommand(app=self.app) + cmd.out = Mock() + cmd.show_body = True + cmd.say_chat('->', 'foo', 'body') + cmd.out.assert_called_with('body') + + def test_say_chat_no_body(self): + cmd = MockCommand(app=self.app) + cmd.out = Mock() + cmd.show_body = False + cmd.say_chat('->', 'foo', 'body') + + @depends_on_current_app + def test_with_cmdline_config(self): + cmd = MockCommand(app=self.app) + cmd.enable_config_from_cmdline = True + cmd.namespace = 'celeryd' + rest = cmd.setup_app_from_commandline(argv=[ + '--loglevel=INFO', '--', + 'broker.url=amqp://broker.example.com', + '.prefetch_multiplier=100']) + self.assertEqual(cmd.app.conf.BROKER_URL, + 'amqp://broker.example.com') + self.assertEqual(cmd.app.conf.CELERYD_PREFETCH_MULTIPLIER, 100) + self.assertListEqual(rest, ['--loglevel=INFO']) + + def test_find_app(self): + cmd = MockCommand(app=self.app) + with patch('celery.bin.base.symbol_by_name') as sbn: + from types import ModuleType + x = ModuleType('proj') + + def on_sbn(*args, **kwargs): + + def after(*args, **kwargs): + x.app = 'quick brown fox' + x.__path__ = None + return x + sbn.side_effect = after + return x + sbn.side_effect = on_sbn + x.__path__ = [True] + self.assertEqual(cmd.find_app('proj'), 'quick brown fox') + + def test_parse_preload_options_shortopt(self): + cmd = Command() + cmd.preload_options = (Option('-s', action='store', dest='silent'), ) + acc = cmd.parse_preload_options(['-s', 'yes']) + self.assertEqual(acc.get('silent'), 'yes') + + def test_parse_preload_options_with_equals_and_append(self): + cmd = Command() + opt = Option('--zoom', action='append', default=[]) + cmd.preload_options = (opt,) + acc = cmd.parse_preload_options(['--zoom=1', '--zoom=2']) + + self.assertEqual(acc, {'zoom': ['1', '2']}) + + def test_parse_preload_options_without_equals_and_append(self): + cmd = Command() + opt = Option('--zoom', action='append', default=[]) + cmd.preload_options = (opt,) + acc = cmd.parse_preload_options(['--zoom', '1', '--zoom', '2']) + + self.assertEqual(acc, {'zoom': ['1', '2']}) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_beat.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_beat.py new file mode 100644 index 0000000..45a7438 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_beat.py @@ -0,0 +1,196 @@ +from __future__ import absolute_import + +import logging +import sys + +from collections import defaultdict + +from celery import beat +from celery import platforms +from celery.bin import beat as beat_bin +from celery.apps import beat as beatapp + +from celery.tests.case import AppCase, Mock, patch, restore_logging +from kombu.tests.case import redirect_stdouts + + +class MockedShelveModule(object): + shelves = defaultdict(lambda: {}) + + def open(self, filename, *args, **kwargs): + return self.shelves[filename] +mocked_shelve = MockedShelveModule() + + +class MockService(beat.Service): + started = False + in_sync = False + persistence = mocked_shelve + + def start(self): + self.__class__.started = True + + def sync(self): + self.__class__.in_sync = True + + +class MockBeat(beatapp.Beat): + running = False + + def run(self): + MockBeat.running = True + + +class MockBeat2(beatapp.Beat): + Service = MockService + + def install_sync_handler(self, b): + pass + + +class MockBeat3(beatapp.Beat): + Service = MockService + + def install_sync_handler(self, b): + raise TypeError('xxx') + + +class test_Beat(AppCase): + + def test_loglevel_string(self): + b = beatapp.Beat(app=self.app, loglevel='DEBUG', + redirect_stdouts=False) + self.assertEqual(b.loglevel, logging.DEBUG) + + b2 = beatapp.Beat(app=self.app, loglevel=logging.DEBUG, + redirect_stdouts=False) + self.assertEqual(b2.loglevel, logging.DEBUG) + + def test_colorize(self): + self.app.log.setup = Mock() + b = beatapp.Beat(app=self.app, no_color=True, + redirect_stdouts=False) + b.setup_logging() + self.assertTrue(self.app.log.setup.called) + self.assertEqual(self.app.log.setup.call_args[1]['colorize'], False) + + def test_init_loader(self): + b = beatapp.Beat(app=self.app, redirect_stdouts=False) + b.init_loader() + + def test_process_title(self): + b = beatapp.Beat(app=self.app, redirect_stdouts=False) + b.set_process_title() + + def test_run(self): + b = MockBeat2(app=self.app, redirect_stdouts=False) + MockService.started = False + b.run() + self.assertTrue(MockService.started) + + def psig(self, fun, *args, **kwargs): + handlers = {} + + class Signals(platforms.Signals): + + def __setitem__(self, sig, handler): + handlers[sig] = handler + + p, platforms.signals = platforms.signals, Signals() + try: + fun(*args, **kwargs) + return handlers + finally: + platforms.signals = p + + def test_install_sync_handler(self): + b = beatapp.Beat(app=self.app, redirect_stdouts=False) + clock = MockService(app=self.app) + MockService.in_sync = False + handlers = self.psig(b.install_sync_handler, clock) + with self.assertRaises(SystemExit): + handlers['SIGINT']('SIGINT', object()) + self.assertTrue(MockService.in_sync) + MockService.in_sync = False + + def test_setup_logging(self): + with restore_logging(): + try: + # py3k + delattr(sys.stdout, 'logger') + except AttributeError: + pass + b = beatapp.Beat(app=self.app, redirect_stdouts=False) + b.redirect_stdouts = False + b.app.log.already_setup = False + b.setup_logging() + with self.assertRaises(AttributeError): + sys.stdout.logger + + @redirect_stdouts + @patch('celery.apps.beat.logger') + def test_logs_errors(self, logger, stdout, stderr): + with restore_logging(): + b = MockBeat3( + app=self.app, redirect_stdouts=False, socket_timeout=None, + ) + b.start_scheduler() + self.assertTrue(logger.critical.called) + + @redirect_stdouts + @patch('celery.platforms.create_pidlock') + def test_use_pidfile(self, create_pidlock, stdout, stderr): + b = MockBeat2(app=self.app, pidfile='pidfilelockfilepid', + socket_timeout=None, redirect_stdouts=False) + b.start_scheduler() + self.assertTrue(create_pidlock.called) + + +class MockDaemonContext(object): + opened = False + closed = False + + def __init__(self, *args, **kwargs): + pass + + def open(self): + self.__class__.opened = True + return self + __enter__ = open + + def close(self, *args): + self.__class__.closed = True + __exit__ = close + + +class test_div(AppCase): + + def setup(self): + self.prev, beatapp.Beat = beatapp.Beat, MockBeat + self.ctx, beat_bin.detached = ( + beat_bin.detached, MockDaemonContext, + ) + + def teardown(self): + beatapp.Beat = self.prev + + def test_main(self): + sys.argv = [sys.argv[0], '-s', 'foo'] + try: + beat_bin.main(app=self.app) + self.assertTrue(MockBeat.running) + finally: + MockBeat.running = False + + def test_detach(self): + cmd = beat_bin.beat() + cmd.app = self.app + cmd.run(detach=True) + self.assertTrue(MockDaemonContext.opened) + self.assertTrue(MockDaemonContext.closed) + + def test_parse_options(self): + cmd = beat_bin.beat() + cmd.app = self.app + options, args = cmd.parse_options('celery beat', ['-s', 'foo']) + self.assertEqual(options.schedule, 'foo') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celery.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celery.py new file mode 100644 index 0000000..fbfdb62 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celery.py @@ -0,0 +1,588 @@ +from __future__ import absolute_import + +import sys + +from anyjson import dumps +from datetime import datetime + +from celery import __main__ +from celery.platforms import EX_FAILURE, EX_USAGE, EX_OK +from celery.bin.base import Error +from celery.bin.celery import ( + Command, + list_, + call, + purge, + result, + inspect, + control, + status, + migrate, + help, + report, + CeleryCommand, + determine_exit_status, + multi, + main as mainfun, + _RemoteControl, + command, +) + +from celery.tests.case import ( + AppCase, Mock, WhateverIO, override_stdouts, patch, +) + + +class test__main__(AppCase): + + def test_warn_deprecated(self): + with override_stdouts() as (stdout, _): + __main__._warn_deprecated('YADDA YADDA') + self.assertIn('command is deprecated', stdout.getvalue()) + self.assertIn('YADDA YADDA', stdout.getvalue()) + + def test_main(self): + with patch('celery.__main__.maybe_patch_concurrency') as mpc: + with patch('celery.bin.celery.main') as main: + __main__.main() + mpc.assert_called_with() + main.assert_called_with() + + def test_compat_worker(self): + with patch('celery.__main__.maybe_patch_concurrency') as mpc: + with patch('celery.__main__._warn_deprecated') as depr: + with patch('celery.bin.worker.main') as main: + __main__._compat_worker() + mpc.assert_called_with() + depr.assert_called_with('celery worker') + main.assert_called_with() + + def test_compat_multi(self): + with patch('celery.__main__.maybe_patch_concurrency') as mpc: + with patch('celery.__main__._warn_deprecated') as depr: + with patch('celery.bin.multi.main') as main: + __main__._compat_multi() + self.assertFalse(mpc.called) + depr.assert_called_with('celery multi') + main.assert_called_with() + + def test_compat_beat(self): + with patch('celery.__main__.maybe_patch_concurrency') as mpc: + with patch('celery.__main__._warn_deprecated') as depr: + with patch('celery.bin.beat.main') as main: + __main__._compat_beat() + mpc.assert_called_with() + depr.assert_called_with('celery beat') + main.assert_called_with() + + +class test_Command(AppCase): + + def test_Error_repr(self): + x = Error('something happened') + self.assertIsNotNone(x.status) + self.assertTrue(x.reason) + self.assertTrue(str(x)) + + def setup(self): + self.out = WhateverIO() + self.err = WhateverIO() + self.cmd = Command(self.app, stdout=self.out, stderr=self.err) + + def test_error(self): + self.cmd.out = Mock() + self.cmd.error('FOO') + self.assertTrue(self.cmd.out.called) + + def test_out(self): + f = Mock() + self.cmd.out('foo', f) + + def test_call(self): + + def ok_run(): + pass + + self.cmd.run = ok_run + self.assertEqual(self.cmd(), EX_OK) + + def error_run(): + raise Error('error', EX_FAILURE) + self.cmd.run = error_run + self.assertEqual(self.cmd(), EX_FAILURE) + + def test_run_from_argv(self): + with self.assertRaises(NotImplementedError): + self.cmd.run_from_argv('prog', ['foo', 'bar']) + + def test_pretty_list(self): + self.assertEqual(self.cmd.pretty([])[1], '- empty -') + self.assertIn('bar', self.cmd.pretty(['foo', 'bar'])[1]) + + def test_pretty_dict(self): + self.assertIn( + 'OK', + str(self.cmd.pretty({'ok': 'the quick brown fox'})[0]), + ) + self.assertIn( + 'ERROR', + str(self.cmd.pretty({'error': 'the quick brown fox'})[0]), + ) + + def test_pretty(self): + self.assertIn('OK', str(self.cmd.pretty('the quick brown'))) + self.assertIn('OK', str(self.cmd.pretty(object()))) + self.assertIn('OK', str(self.cmd.pretty({'foo': 'bar'}))) + + +class test_list(AppCase): + + def test_list_bindings_no_support(self): + l = list_(app=self.app, stderr=WhateverIO()) + management = Mock() + management.get_bindings.side_effect = NotImplementedError() + with self.assertRaises(Error): + l.list_bindings(management) + + def test_run(self): + l = list_(app=self.app, stderr=WhateverIO()) + l.run('bindings') + + with self.assertRaises(Error): + l.run(None) + + with self.assertRaises(Error): + l.run('foo') + + +class test_call(AppCase): + + def setup(self): + + @self.app.task(shared=False) + def add(x, y): + return x + y + self.add = add + + @patch('celery.app.base.Celery.send_task') + def test_run(self, send_task): + a = call(app=self.app, stderr=WhateverIO(), stdout=WhateverIO()) + a.run(self.add.name) + self.assertTrue(send_task.called) + + a.run(self.add.name, + args=dumps([4, 4]), + kwargs=dumps({'x': 2, 'y': 2})) + self.assertEqual(send_task.call_args[1]['args'], [4, 4]) + self.assertEqual(send_task.call_args[1]['kwargs'], {'x': 2, 'y': 2}) + + a.run(self.add.name, expires=10, countdown=10) + self.assertEqual(send_task.call_args[1]['expires'], 10) + self.assertEqual(send_task.call_args[1]['countdown'], 10) + + now = datetime.now() + iso = now.isoformat() + a.run(self.add.name, expires=iso) + self.assertEqual(send_task.call_args[1]['expires'], now) + with self.assertRaises(ValueError): + a.run(self.add.name, expires='foobaribazibar') + + +class test_purge(AppCase): + + @patch('celery.app.control.Control.purge') + def test_run(self, purge_): + out = WhateverIO() + a = purge(app=self.app, stdout=out) + purge_.return_value = 0 + a.run(force=True) + self.assertIn('No messages purged', out.getvalue()) + + purge_.return_value = 100 + a.run(force=True) + self.assertIn('100 messages', out.getvalue()) + + +class test_result(AppCase): + + def setup(self): + + @self.app.task(shared=False) + def add(x, y): + return x + y + self.add = add + + def test_run(self): + with patch('celery.result.AsyncResult.get') as get: + out = WhateverIO() + r = result(app=self.app, stdout=out) + get.return_value = 'Jerry' + r.run('id') + self.assertIn('Jerry', out.getvalue()) + + get.return_value = 'Elaine' + r.run('id', task=self.add.name) + self.assertIn('Elaine', out.getvalue()) + + with patch('celery.result.AsyncResult.traceback') as tb: + r.run('id', task=self.add.name, traceback=True) + self.assertIn(str(tb), out.getvalue()) + + +class test_status(AppCase): + + @patch('celery.bin.celery.inspect') + def test_run(self, inspect_): + out, err = WhateverIO(), WhateverIO() + ins = inspect_.return_value = Mock() + ins.run.return_value = [] + s = status(self.app, stdout=out, stderr=err) + with self.assertRaises(Error): + s.run() + + ins.run.return_value = ['a', 'b', 'c'] + s.run() + self.assertIn('3 nodes online', out.getvalue()) + s.run(quiet=True) + + +class test_migrate(AppCase): + + @patch('celery.contrib.migrate.migrate_tasks') + def test_run(self, migrate_tasks): + out = WhateverIO() + m = migrate(app=self.app, stdout=out, stderr=WhateverIO()) + with self.assertRaises(TypeError): + m.run() + self.assertFalse(migrate_tasks.called) + + m.run('memory://foo', 'memory://bar') + self.assertTrue(migrate_tasks.called) + + state = Mock() + state.count = 10 + state.strtotal = 30 + m.on_migrate_task(state, {'task': 'tasks.add', 'id': 'ID'}, None) + self.assertIn('10/30', out.getvalue()) + + +class test_report(AppCase): + + def test_run(self): + out = WhateverIO() + r = report(app=self.app, stdout=out) + self.assertEqual(r.run(), EX_OK) + self.assertTrue(out.getvalue()) + + +class test_help(AppCase): + + def test_run(self): + out = WhateverIO() + h = help(app=self.app, stdout=out) + h.parser = Mock() + self.assertEqual(h.run(), EX_USAGE) + self.assertTrue(out.getvalue()) + self.assertTrue(h.usage('help')) + h.parser.print_help.assert_called_with() + + +class test_CeleryCommand(AppCase): + + def test_execute_from_commandline(self): + x = CeleryCommand(app=self.app) + x.handle_argv = Mock() + x.handle_argv.return_value = 1 + with self.assertRaises(SystemExit): + x.execute_from_commandline() + + x.handle_argv.return_value = True + with self.assertRaises(SystemExit): + x.execute_from_commandline() + + x.handle_argv.side_effect = KeyboardInterrupt() + with self.assertRaises(SystemExit): + x.execute_from_commandline() + + x.respects_app_option = True + with self.assertRaises(SystemExit): + x.execute_from_commandline(['celery', 'multi']) + self.assertFalse(x.respects_app_option) + x.respects_app_option = True + with self.assertRaises(SystemExit): + x.execute_from_commandline(['manage.py', 'celery', 'multi']) + self.assertFalse(x.respects_app_option) + + def test_with_pool_option(self): + x = CeleryCommand(app=self.app) + self.assertIsNone(x.with_pool_option(['celery', 'events'])) + self.assertTrue(x.with_pool_option(['celery', 'worker'])) + self.assertTrue(x.with_pool_option(['manage.py', 'celery', 'worker'])) + + def test_load_extensions_no_commands(self): + with patch('celery.bin.celery.Extensions') as Ext: + ext = Ext.return_value = Mock(name='Extension') + ext.load.return_value = None + x = CeleryCommand(app=self.app) + x.load_extension_commands() + + def test_determine_exit_status(self): + self.assertEqual(determine_exit_status('true'), EX_OK) + self.assertEqual(determine_exit_status(''), EX_FAILURE) + + def test_relocate_args_from_start(self): + x = CeleryCommand(app=self.app) + self.assertEqual(x._relocate_args_from_start(None), []) + self.assertEqual( + x._relocate_args_from_start( + ['-l', 'debug', 'worker', '-c', '3', '--foo'], + ), + ['worker', '-c', '3', '--foo', '-l', 'debug'], + ) + self.assertEqual( + x._relocate_args_from_start( + ['--pool=gevent', '-l', 'debug', 'worker', '--foo', '-c', '3'], + ), + ['worker', '--foo', '-c', '3', '--pool=gevent', '-l', 'debug'], + ) + self.assertEqual( + x._relocate_args_from_start(['foo', '--foo=1']), + ['foo', '--foo=1'], + ) + + def test_handle_argv(self): + x = CeleryCommand(app=self.app) + x.execute = Mock() + x.handle_argv('celery', []) + x.execute.assert_called_with('help', ['help']) + + x.handle_argv('celery', ['start', 'foo']) + x.execute.assert_called_with('start', ['start', 'foo']) + + def test_execute(self): + x = CeleryCommand(app=self.app) + Help = x.commands['help'] = Mock() + help = Help.return_value = Mock() + x.execute('fooox', ['a']) + help.run_from_argv.assert_called_with(x.prog_name, [], command='help') + help.reset() + x.execute('help', ['help']) + help.run_from_argv.assert_called_with(x.prog_name, [], command='help') + + Dummy = x.commands['dummy'] = Mock() + dummy = Dummy.return_value = Mock() + exc = dummy.run_from_argv.side_effect = Error( + 'foo', status='EX_FAILURE', + ) + x.on_error = Mock(name='on_error') + help.reset() + x.execute('dummy', ['dummy']) + x.on_error.assert_called_with(exc) + dummy.run_from_argv.assert_called_with( + x.prog_name, [], command='dummy', + ) + help.run_from_argv.assert_called_with( + x.prog_name, [], command='help', + ) + + exc = dummy.run_from_argv.side_effect = x.UsageError('foo') + x.on_usage_error = Mock() + x.execute('dummy', ['dummy']) + x.on_usage_error.assert_called_with(exc) + + def test_on_usage_error(self): + x = CeleryCommand(app=self.app) + x.error = Mock() + x.on_usage_error(x.UsageError('foo'), command=None) + self.assertTrue(x.error.called) + x.on_usage_error(x.UsageError('foo'), command='dummy') + + def test_prepare_prog_name(self): + x = CeleryCommand(app=self.app) + main = Mock(name='__main__') + main.__file__ = '/opt/foo.py' + with patch.dict(sys.modules, __main__=main): + self.assertEqual(x.prepare_prog_name('__main__.py'), '/opt/foo.py') + self.assertEqual(x.prepare_prog_name('celery'), 'celery') + + +class test_RemoteControl(AppCase): + + def test_call_interface(self): + with self.assertRaises(NotImplementedError): + _RemoteControl(app=self.app).call() + + +class test_inspect(AppCase): + + def test_usage(self): + self.assertTrue(inspect(app=self.app).usage('foo')) + + def test_command_info(self): + i = inspect(app=self.app) + self.assertTrue(i.get_command_info( + 'ping', help=True, color=i.colored.red, + )) + + def test_list_commands_color(self): + i = inspect(app=self.app) + self.assertTrue(i.list_commands( + help=True, color=i.colored.red, + )) + self.assertTrue(i.list_commands( + help=False, color=None, + )) + + def test_epilog(self): + self.assertTrue(inspect(app=self.app).epilog) + + def test_do_call_method_sql_transport_type(self): + self.app.connection = Mock() + conn = self.app.connection.return_value = Mock(name='Connection') + conn.transport.driver_type = 'sql' + i = inspect(app=self.app) + with self.assertRaises(i.Error): + i.do_call_method(['ping']) + + def test_say_directions(self): + i = inspect(self.app) + i.out = Mock() + i.quiet = True + i.say_chat('<-', 'hello out') + self.assertFalse(i.out.called) + + i.say_chat('->', 'hello in') + self.assertTrue(i.out.called) + + i.quiet = False + i.out.reset_mock() + i.say_chat('<-', 'hello out', 'body') + self.assertTrue(i.out.called) + + @patch('celery.app.control.Control.inspect') + def test_run(self, real): + out = WhateverIO() + i = inspect(app=self.app, stdout=out) + with self.assertRaises(Error): + i.run() + with self.assertRaises(Error): + i.run('help') + with self.assertRaises(Error): + i.run('xyzzybaz') + + i.run('ping') + self.assertTrue(real.called) + i.run('ping', destination='foo,bar') + self.assertEqual(real.call_args[1]['destination'], ['foo', 'bar']) + self.assertEqual(real.call_args[1]['timeout'], 0.2) + callback = real.call_args[1]['callback'] + + callback({'foo': {'ok': 'pong'}}) + self.assertIn('OK', out.getvalue()) + + instance = real.return_value = Mock() + instance.ping.return_value = None + with self.assertRaises(Error): + i.run('ping') + + out.seek(0) + out.truncate() + i.quiet = True + i.say_chat('<-', 'hello') + self.assertFalse(out.getvalue()) + + +class test_control(AppCase): + + def control(self, patch_call, *args, **kwargs): + kwargs.setdefault('app', Mock(name='app')) + c = control(*args, **kwargs) + if patch_call: + c.call = Mock(name='control.call') + return c + + def test_call(self): + i = self.control(False) + i.call('foo', 1, kw=2) + i.app.control.foo.assert_called_with(1, kw=2, reply=True) + + def test_pool_grow(self): + i = self.control(True) + i.pool_grow('pool_grow', n=2) + i.call.assert_called_with('pool_grow', 2) + + def test_pool_shrink(self): + i = self.control(True) + i.pool_shrink('pool_shrink', n=2) + i.call.assert_called_with('pool_shrink', 2) + + def test_autoscale(self): + i = self.control(True) + i.autoscale('autoscale', max=3, min=2) + i.call.assert_called_with('autoscale', 3, 2) + + def test_rate_limit(self): + i = self.control(True) + i.rate_limit('rate_limit', 'proj.add', '1/s') + i.call.assert_called_with('rate_limit', 'proj.add', '1/s') + + def test_time_limit(self): + i = self.control(True) + i.time_limit('time_limit', 'proj.add', 10, 30) + i.call.assert_called_with('time_limit', 'proj.add', 10, 30) + + def test_add_consumer(self): + i = self.control(True) + i.add_consumer( + 'add_consumer', 'queue', 'exchange', 'topic', 'rkey', + durable=True, + ) + i.call.assert_called_with( + 'add_consumer', 'queue', 'exchange', 'topic', 'rkey', + durable=True, + ) + + def test_cancel_consumer(self): + i = self.control(True) + i.cancel_consumer('cancel_consumer', 'queue') + i.call.assert_called_with('cancel_consumer', 'queue') + + +class test_multi(AppCase): + + def test_get_options(self): + self.assertTupleEqual(multi(app=self.app).get_options(), ()) + + def test_run_from_argv(self): + with patch('celery.bin.multi.MultiTool') as MultiTool: + m = MultiTool.return_value = Mock() + multi(self.app).run_from_argv('celery', ['arg'], command='multi') + m.execute_from_commandline.assert_called_with( + ['multi', 'arg'], 'celery', + ) + + +class test_main(AppCase): + + @patch('celery.bin.celery.CeleryCommand') + def test_main(self, Command): + cmd = Command.return_value = Mock() + mainfun() + cmd.execute_from_commandline.assert_called_with(None) + + @patch('celery.bin.celery.CeleryCommand') + def test_main_KeyboardInterrupt(self, Command): + cmd = Command.return_value = Mock() + cmd.execute_from_commandline.side_effect = KeyboardInterrupt() + mainfun() + cmd.execute_from_commandline.assert_called_with(None) + + +class test_compat(AppCase): + + def test_compat_command_decorator(self): + with patch('celery.bin.celery.CeleryCommand') as CC: + self.assertEqual(command(), CC.register_command) + fun = Mock(name='fun') + command(fun) + CC.register_command.assert_called_with(fun) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryd_detach.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryd_detach.py new file mode 100644 index 0000000..0fa3934 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryd_detach.py @@ -0,0 +1,106 @@ +from __future__ import absolute_import + +from celery.platforms import IS_WINDOWS +from celery.bin.celeryd_detach import ( + detach, + detached_celeryd, + main, +) + +from celery.tests.case import AppCase, Mock, override_stdouts, patch + + +if not IS_WINDOWS: + class test_detached(AppCase): + + @patch('celery.bin.celeryd_detach.detached') + @patch('os.execv') + @patch('celery.bin.celeryd_detach.logger') + @patch('celery.app.log.Logging.setup_logging_subsystem') + def test_execs(self, setup_logs, logger, execv, detached): + context = detached.return_value = Mock() + context.__enter__ = Mock() + context.__exit__ = Mock() + + detach('/bin/boo', ['a', 'b', 'c'], logfile='/var/log', + pidfile='/var/pid', hostname='foo@example.com') + detached.assert_called_with( + '/var/log', '/var/pid', None, None, None, None, False, + after_forkers=False, + ) + execv.assert_called_with('/bin/boo', ['/bin/boo', 'a', 'b', 'c']) + + execv.side_effect = Exception('foo') + r = detach( + '/bin/boo', ['a', 'b', 'c'], + logfile='/var/log', pidfile='/var/pid', + hostname='foo@example.com', app=self.app) + context.__enter__.assert_called_with() + self.assertTrue(logger.critical.called) + setup_logs.assert_called_with( + 'ERROR', '/var/log', hostname='foo@example.com') + self.assertEqual(r, 1) + + +class test_PartialOptionParser(AppCase): + + def test_parser(self): + x = detached_celeryd(self.app) + p = x.Parser('celeryd_detach') + options, values = p.parse_args(['--logfile=foo', '--fake', '--enable', + 'a', 'b', '-c1', '-d', '2']) + self.assertEqual(options.logfile, 'foo') + self.assertEqual(values, ['a', 'b']) + self.assertEqual(p.leftovers, ['--enable', '-c1', '-d', '2']) + + with override_stdouts(): + with self.assertRaises(SystemExit): + p.parse_args(['--logfile']) + p.get_option('--logfile').nargs = 2 + with self.assertRaises(SystemExit): + p.parse_args(['--logfile=a']) + with self.assertRaises(SystemExit): + p.parse_args(['--fake=abc']) + + assert p.get_option('--logfile').nargs == 2 + p.parse_args(['--logfile=a', 'b']) + p.get_option('--logfile').nargs = 1 + + +class test_Command(AppCase): + argv = ['--autoscale=10,2', '-c', '1', + '--logfile=/var/log', '-lDEBUG', + '--', '.disable_rate_limits=1'] + + def test_parse_options(self): + x = detached_celeryd(app=self.app) + o, v, l = x.parse_options('cd', self.argv) + self.assertEqual(o.logfile, '/var/log') + self.assertEqual(l, ['--autoscale=10,2', '-c', '1', + '-lDEBUG', '--logfile=/var/log', + '--pidfile=celeryd.pid']) + x.parse_options('cd', []) # no args + + @patch('sys.exit') + @patch('celery.bin.celeryd_detach.detach') + def test_execute_from_commandline(self, detach, exit): + x = detached_celeryd(app=self.app) + x.execute_from_commandline(self.argv) + self.assertTrue(exit.called) + detach.assert_called_with( + path=x.execv_path, uid=None, gid=None, + umask=None, fake=False, logfile='/var/log', pidfile='celeryd.pid', + working_directory=None, executable=None, hostname=None, + argv=x.execv_argv + [ + '-c', '1', '-lDEBUG', + '--logfile=/var/log', '--pidfile=celeryd.pid', + '--', '.disable_rate_limits=1' + ], + app=self.app, + ) + + @patch('celery.bin.celeryd_detach.detached_celeryd') + def test_main(self, command): + c = command.return_value = Mock() + main(self.app) + c.execute_from_commandline.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryevdump.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryevdump.py new file mode 100644 index 0000000..09cdc4d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_celeryevdump.py @@ -0,0 +1,68 @@ +from __future__ import absolute_import + +from time import time + +from celery.events.dumper import ( + humanize_type, + Dumper, + evdump, +) + +from celery.tests.case import AppCase, Mock, WhateverIO, patch + + +class test_Dumper(AppCase): + + def setup(self): + self.out = WhateverIO() + self.dumper = Dumper(out=self.out) + + def test_humanize_type(self): + self.assertEqual(humanize_type('worker-offline'), 'shutdown') + self.assertEqual(humanize_type('task-started'), 'task started') + + def test_format_task_event(self): + self.dumper.format_task_event( + 'worker@example.com', time(), 'task-started', 'tasks.add', {}) + self.assertTrue(self.out.getvalue()) + + def test_on_event(self): + event = { + 'hostname': 'worker@example.com', + 'timestamp': time(), + 'uuid': '1ef', + 'name': 'tasks.add', + 'args': '(2, 2)', + 'kwargs': '{}', + } + self.dumper.on_event(dict(event, type='task-received')) + self.assertTrue(self.out.getvalue()) + self.dumper.on_event(dict(event, type='task-revoked')) + self.dumper.on_event(dict(event, type='worker-online')) + + @patch('celery.events.EventReceiver.capture') + def test_evdump(self, capture): + capture.side_effect = KeyboardInterrupt() + evdump(app=self.app) + + def test_evdump_error_handler(self): + app = Mock(name='app') + with patch('celery.events.dumper.Dumper') as Dumper: + Dumper.return_value = Mock(name='dumper') + recv = app.events.Receiver.return_value = Mock() + + def se(*_a, **_k): + recv.capture.side_effect = SystemExit() + raise KeyError() + recv.capture.side_effect = se + + Conn = app.connection.return_value = Mock(name='conn') + conn = Conn.clone.return_value = Mock(name='cloned_conn') + conn.connection_errors = (KeyError, ) + conn.channel_errors = () + + evdump(app) + self.assertTrue(conn.ensure_connection.called) + errback = conn.ensure_connection.call_args[0][0] + errback(KeyError(), 1) + self.assertTrue(conn.as_uri.called) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_events.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_events.py new file mode 100644 index 0000000..a6e79f7 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_events.py @@ -0,0 +1,73 @@ +from __future__ import absolute_import + +from celery.bin import events + +from celery.tests.case import AppCase, SkipTest, patch, _old_patch + + +class MockCommand(object): + executed = [] + + def execute_from_commandline(self, **kwargs): + self.executed.append(True) + + +def proctitle(prog, info=None): + proctitle.last = (prog, info) +proctitle.last = () + + +class test_events(AppCase): + + def setup(self): + self.ev = events.events(app=self.app) + + @_old_patch('celery.events.dumper', 'evdump', + lambda **kw: 'me dumper, you?') + @_old_patch('celery.bin.events', 'set_process_title', proctitle) + def test_run_dump(self): + self.assertEqual(self.ev.run(dump=True), 'me dumper, you?') + self.assertIn('celery events:dump', proctitle.last[0]) + + def test_run_top(self): + try: + import curses # noqa + except ImportError: + raise SkipTest('curses monitor requires curses') + + @_old_patch('celery.events.cursesmon', 'evtop', + lambda **kw: 'me top, you?') + @_old_patch('celery.bin.events', 'set_process_title', proctitle) + def _inner(): + self.assertEqual(self.ev.run(), 'me top, you?') + self.assertIn('celery events:top', proctitle.last[0]) + return _inner() + + @_old_patch('celery.events.snapshot', 'evcam', + lambda *a, **k: (a, k)) + @_old_patch('celery.bin.events', 'set_process_title', proctitle) + def test_run_cam(self): + a, kw = self.ev.run(camera='foo.bar.baz', logfile='logfile') + self.assertEqual(a[0], 'foo.bar.baz') + self.assertEqual(kw['freq'], 1.0) + self.assertIsNone(kw['maxrate']) + self.assertEqual(kw['loglevel'], 'INFO') + self.assertEqual(kw['logfile'], 'logfile') + self.assertIn('celery events:cam', proctitle.last[0]) + + @patch('celery.events.snapshot.evcam') + @patch('celery.bin.events.detached') + def test_run_cam_detached(self, detached, evcam): + self.ev.prog_name = 'celery events' + self.ev.run_evcam('myapp.Camera', detach=True) + self.assertTrue(detached.called) + self.assertTrue(evcam.called) + + def test_get_options(self): + self.assertTrue(self.ev.get_options()) + + @_old_patch('celery.bin.events', 'events', MockCommand) + def test_main(self): + MockCommand.executed = [] + events.main() + self.assertTrue(MockCommand.executed) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_multi.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_multi.py new file mode 100644 index 0000000..ee77a45 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_multi.py @@ -0,0 +1,474 @@ +from __future__ import absolute_import + +import errno +import signal +import sys + +from celery.bin.multi import ( + main, + MultiTool, + findsig, + abbreviations, + parse_ns_range, + format_opt, + quote, + NamespacedOptionParser, + multi_args, + __doc__ as doc, +) + +from celery.tests.case import AppCase, Mock, WhateverIO, SkipTest, patch + + +class test_functions(AppCase): + + def test_findsig(self): + self.assertEqual(findsig(['a', 'b', 'c', '-1']), 1) + self.assertEqual(findsig(['--foo=1', '-9']), 9) + self.assertEqual(findsig(['-INT']), signal.SIGINT) + self.assertEqual(findsig([]), signal.SIGTERM) + self.assertEqual(findsig(['-s']), signal.SIGTERM) + self.assertEqual(findsig(['-log']), signal.SIGTERM) + + def test_abbreviations(self): + expander = abbreviations({'%s': 'START', + '%x': 'STOP'}) + self.assertEqual(expander('foo%s'), 'fooSTART') + self.assertEqual(expander('foo%x'), 'fooSTOP') + self.assertEqual(expander('foo%y'), 'foo%y') + self.assertIsNone(expander(None)) + + def test_parse_ns_range(self): + self.assertEqual(parse_ns_range('1-3', True), ['1', '2', '3']) + self.assertEqual(parse_ns_range('1-3', False), ['1-3']) + self.assertEqual(parse_ns_range( + '1-3,10,11,20', True), + ['1', '2', '3', '10', '11', '20'], + ) + + def test_format_opt(self): + self.assertEqual(format_opt('--foo', None), '--foo') + self.assertEqual(format_opt('-c', 1), '-c 1') + self.assertEqual(format_opt('--log', 'foo'), '--log=foo') + + def test_quote(self): + self.assertEqual(quote("the 'quick"), "'the '\\''quick'") + + +class test_NamespacedOptionParser(AppCase): + + def test_parse(self): + x = NamespacedOptionParser(['-c:1,3', '4']) + self.assertEqual(x.namespaces.get('1,3'), {'-c': '4'}) + x = NamespacedOptionParser(['-c:jerry,elaine', '5', + '--loglevel:kramer=DEBUG', + '--flag', + '--logfile=foo', '-Q', 'bar', 'a', 'b', + '--', '.disable_rate_limits=1']) + self.assertEqual(x.options, {'--logfile': 'foo', + '-Q': 'bar', + '--flag': None}) + self.assertEqual(x.values, ['a', 'b']) + self.assertEqual(x.namespaces.get('jerry,elaine'), {'-c': '5'}) + self.assertEqual(x.namespaces.get('kramer'), {'--loglevel': 'DEBUG'}) + self.assertEqual(x.passthrough, '-- .disable_rate_limits=1') + + +class test_multi_args(AppCase): + + @patch('socket.gethostname') + def test_parse(self, gethostname): + p = NamespacedOptionParser([ + '-c:jerry,elaine', '5', + '--loglevel:kramer=DEBUG', + '--flag', + '--logfile=foo', '-Q', 'bar', 'jerry', + 'elaine', 'kramer', + '--', '.disable_rate_limits=1', + ]) + it = multi_args(p, cmd='COMMAND', append='*AP*', + prefix='*P*', suffix='*S*') + names = list(it) + + def assert_line_in(name, args): + self.assertIn(name, [tup[0] for tup in names]) + argv = None + for item in names: + if item[0] == name: + argv = item[1] + self.assertTrue(argv) + for arg in args: + self.assertIn(arg, argv) + + assert_line_in( + '*P*jerry@*S*', + ['COMMAND', '-n *P*jerry@*S*', '-Q bar', + '-c 5', '--flag', '--logfile=foo', + '-- .disable_rate_limits=1', '*AP*'], + ) + assert_line_in( + '*P*elaine@*S*', + ['COMMAND', '-n *P*elaine@*S*', '-Q bar', + '-c 5', '--flag', '--logfile=foo', + '-- .disable_rate_limits=1', '*AP*'], + ) + assert_line_in( + '*P*kramer@*S*', + ['COMMAND', '--loglevel=DEBUG', '-n *P*kramer@*S*', + '-Q bar', '--flag', '--logfile=foo', + '-- .disable_rate_limits=1', '*AP*'], + ) + expand = names[0][2] + self.assertEqual(expand('%h'), '*P*jerry@*S*') + self.assertEqual(expand('%n'), 'jerry') + names2 = list(multi_args(p, cmd='COMMAND', append='', + prefix='*P*', suffix='*S*')) + self.assertEqual(names2[0][1][-1], '-- .disable_rate_limits=1') + + gethostname.return_value = 'example.com' + p2 = NamespacedOptionParser(['10', '-c:1', '5']) + names3 = list(multi_args(p2, cmd='COMMAND')) + self.assertEqual(len(names3), 10) + self.assertEqual( + names3[0][0:2], + ('celery1@example.com', + ['COMMAND', '-n celery1@example.com', '-c 5', '']), + ) + for i, worker in enumerate(names3[1:]): + self.assertEqual( + worker[0:2], + ('celery%s@example.com' % (i + 2), + ['COMMAND', '-n celery%s@example.com' % (i + 2), '']), + ) + + names4 = list(multi_args(p2, cmd='COMMAND', suffix='""')) + self.assertEqual(len(names4), 10) + self.assertEqual( + names4[0][0:2], + ('celery1@', + ['COMMAND', '-n celery1@', '-c 5', '']), + ) + + p3 = NamespacedOptionParser(['foo@', '-c:foo', '5']) + names5 = list(multi_args(p3, cmd='COMMAND', suffix='""')) + self.assertEqual( + names5[0][0:2], + ('foo@', + ['COMMAND', '-n foo@', '-c 5', '']), + ) + + +class test_MultiTool(AppCase): + + def setup(self): + self.fh = WhateverIO() + self.env = {} + self.t = MultiTool(env=self.env, fh=self.fh) + + def test_note(self): + self.t.note('hello world') + self.assertEqual(self.fh.getvalue(), 'hello world\n') + + def test_note_quiet(self): + self.t.quiet = True + self.t.note('hello world') + self.assertFalse(self.fh.getvalue()) + + def test_info(self): + self.t.verbose = True + self.t.info('hello info') + self.assertEqual(self.fh.getvalue(), 'hello info\n') + + def test_info_not_verbose(self): + self.t.verbose = False + self.t.info('hello info') + self.assertFalse(self.fh.getvalue()) + + def test_error(self): + self.t.carp = Mock() + self.t.usage = Mock() + self.assertEqual(self.t.error('foo'), 1) + self.t.carp.assert_called_with('foo') + self.t.usage.assert_called_with() + + self.t.carp = Mock() + self.assertEqual(self.t.error(), 1) + self.assertFalse(self.t.carp.called) + + self.assertEqual(self.t.retcode, 1) + + @patch('celery.bin.multi.Popen') + def test_waitexec(self, Popen): + self.t.note = Mock() + pipe = Popen.return_value = Mock() + pipe.wait.return_value = -10 + self.assertEqual(self.t.waitexec(['-m', 'foo'], 'path'), 10) + Popen.assert_called_with(['path', '-m', 'foo'], env=self.t.env) + self.t.note.assert_called_with('* Child was terminated by signal 10') + + pipe.wait.return_value = 2 + self.assertEqual(self.t.waitexec(['-m', 'foo'], 'path'), 2) + self.t.note.assert_called_with( + '* Child terminated with errorcode 2', + ) + + pipe.wait.return_value = 0 + self.assertFalse(self.t.waitexec(['-m', 'foo', 'path'])) + + def test_nosplash(self): + self.t.nosplash = True + self.t.splash() + self.assertFalse(self.fh.getvalue()) + + def test_splash(self): + self.t.nosplash = False + self.t.splash() + self.assertIn('celery multi', self.fh.getvalue()) + + def test_usage(self): + self.t.usage() + self.assertTrue(self.fh.getvalue()) + + def test_help(self): + self.t.help([]) + self.assertIn(doc, self.fh.getvalue()) + + def test_expand(self): + self.t.expand(['foo%n', 'ask', 'klask', 'dask']) + self.assertEqual( + self.fh.getvalue(), 'fooask\nfooklask\nfoodask\n', + ) + + def test_restart(self): + stop = self.t._stop_nodes = Mock() + self.t.restart(['jerry', 'george'], 'celery worker') + waitexec = self.t.waitexec = Mock() + self.assertTrue(stop.called) + callback = stop.call_args[1]['callback'] + self.assertTrue(callback) + + waitexec.return_value = 0 + callback('jerry', ['arg'], 13) + waitexec.assert_called_with(['arg'], path=sys.executable) + self.assertIn('OK', self.fh.getvalue()) + self.fh.seek(0) + self.fh.truncate() + + waitexec.return_value = 1 + callback('jerry', ['arg'], 13) + self.assertIn('FAILED', self.fh.getvalue()) + + def test_stop(self): + self.t.getpids = Mock() + self.t.getpids.return_value = [2, 3, 4] + self.t.shutdown_nodes = Mock() + self.t.stop(['a', 'b', '-INT'], 'celery worker') + self.t.shutdown_nodes.assert_called_with( + [2, 3, 4], sig=signal.SIGINT, retry=None, callback=None, + + ) + + def test_kill(self): + if not hasattr(signal, 'SIGKILL'): + raise SkipTest('SIGKILL not supported by this platform') + self.t.getpids = Mock() + self.t.getpids.return_value = [ + ('a', None, 10), + ('b', None, 11), + ('c', None, 12) + ] + sig = self.t.signal_node = Mock() + + self.t.kill(['a', 'b', 'c'], 'celery worker') + + sigs = sig.call_args_list + self.assertEqual(len(sigs), 3) + self.assertEqual(sigs[0][0], ('a', 10, signal.SIGKILL)) + self.assertEqual(sigs[1][0], ('b', 11, signal.SIGKILL)) + self.assertEqual(sigs[2][0], ('c', 12, signal.SIGKILL)) + + def prepare_pidfile_for_getpids(self, Pidfile): + class pids(object): + + def __init__(self, path): + self.path = path + + def read_pid(self): + try: + return {'foo.pid': 10, + 'bar.pid': 11}[self.path] + except KeyError: + raise ValueError() + Pidfile.side_effect = pids + + @patch('celery.bin.multi.Pidfile') + @patch('socket.gethostname') + def test_getpids(self, gethostname, Pidfile): + gethostname.return_value = 'e.com' + self.prepare_pidfile_for_getpids(Pidfile) + callback = Mock() + + p = NamespacedOptionParser(['foo', 'bar', 'baz']) + nodes = self.t.getpids(p, 'celery worker', callback=callback) + node_0, node_1 = nodes + self.assertEqual(node_0[0], 'foo@e.com') + self.assertEqual( + sorted(node_0[1]), + sorted(('celery worker', '--pidfile=foo.pid', + '-n foo@e.com', '')), + ) + self.assertEqual(node_0[2], 10) + + self.assertEqual(node_1[0], 'bar@e.com') + self.assertEqual( + sorted(node_1[1]), + sorted(('celery worker', '--pidfile=bar.pid', + '-n bar@e.com', '')), + ) + self.assertEqual(node_1[2], 11) + self.assertTrue(callback.called) + cargs, _ = callback.call_args + self.assertEqual(cargs[0], 'baz@e.com') + self.assertItemsEqual( + cargs[1], + ['celery worker', '--pidfile=baz.pid', '-n baz@e.com', ''], + ) + self.assertIsNone(cargs[2]) + self.assertIn('DOWN', self.fh.getvalue()) + + # without callback, should work + nodes = self.t.getpids(p, 'celery worker', callback=None) + + @patch('celery.bin.multi.Pidfile') + @patch('socket.gethostname') + @patch('celery.bin.multi.sleep') + def test_shutdown_nodes(self, slepp, gethostname, Pidfile): + gethostname.return_value = 'e.com' + self.prepare_pidfile_for_getpids(Pidfile) + self.assertIsNone(self.t.shutdown_nodes([])) + self.t.signal_node = Mock() + node_alive = self.t.node_alive = Mock() + self.t.node_alive.return_value = False + + callback = Mock() + self.t.stop(['foo', 'bar', 'baz'], 'celery worker', callback=callback) + sigs = sorted(self.t.signal_node.call_args_list) + self.assertEqual(len(sigs), 2) + self.assertIn( + ('foo@e.com', 10, signal.SIGTERM), + [tup[0] for tup in sigs], + ) + self.assertIn( + ('bar@e.com', 11, signal.SIGTERM), + [tup[0] for tup in sigs], + ) + self.t.signal_node.return_value = False + self.assertTrue(callback.called) + self.t.stop(['foo', 'bar', 'baz'], 'celery worker', callback=None) + + def on_node_alive(pid): + if node_alive.call_count > 4: + return True + return False + self.t.signal_node.return_value = True + self.t.node_alive.side_effect = on_node_alive + self.t.stop(['foo', 'bar', 'baz'], 'celery worker', retry=True) + + @patch('os.kill') + def test_node_alive(self, kill): + kill.return_value = True + self.assertTrue(self.t.node_alive(13)) + esrch = OSError() + esrch.errno = errno.ESRCH + kill.side_effect = esrch + self.assertFalse(self.t.node_alive(13)) + kill.assert_called_with(13, 0) + + enoent = OSError() + enoent.errno = errno.ENOENT + kill.side_effect = enoent + with self.assertRaises(OSError): + self.t.node_alive(13) + + @patch('os.kill') + def test_signal_node(self, kill): + kill.return_value = True + self.assertTrue(self.t.signal_node('foo', 13, 9)) + esrch = OSError() + esrch.errno = errno.ESRCH + kill.side_effect = esrch + self.assertFalse(self.t.signal_node('foo', 13, 9)) + kill.assert_called_with(13, 9) + self.assertIn('Could not signal foo', self.fh.getvalue()) + + enoent = OSError() + enoent.errno = errno.ENOENT + kill.side_effect = enoent + with self.assertRaises(OSError): + self.t.signal_node('foo', 13, 9) + + def test_start(self): + self.t.waitexec = Mock() + self.t.waitexec.return_value = 0 + self.assertFalse(self.t.start(['foo', 'bar', 'baz'], 'celery worker')) + + self.t.waitexec.return_value = 1 + self.assertFalse(self.t.start(['foo', 'bar', 'baz'], 'celery worker')) + + def test_show(self): + self.t.show(['foo', 'bar', 'baz'], 'celery worker') + self.assertTrue(self.fh.getvalue()) + + @patch('socket.gethostname') + def test_get(self, gethostname): + gethostname.return_value = 'e.com' + self.t.get(['xuzzy@e.com', 'foo', 'bar', 'baz'], 'celery worker') + self.assertFalse(self.fh.getvalue()) + self.t.get(['foo@e.com', 'foo', 'bar', 'baz'], 'celery worker') + self.assertTrue(self.fh.getvalue()) + + @patch('socket.gethostname') + def test_names(self, gethostname): + gethostname.return_value = 'e.com' + self.t.names(['foo', 'bar', 'baz'], 'celery worker') + self.assertIn('foo@e.com\nbar@e.com\nbaz@e.com', self.fh.getvalue()) + + def test_execute_from_commandline(self): + start = self.t.commands['start'] = Mock() + self.t.error = Mock() + self.t.execute_from_commandline(['multi', 'start', 'foo', 'bar']) + self.assertFalse(self.t.error.called) + start.assert_called_with(['foo', 'bar'], 'celery worker') + + self.t.error = Mock() + self.t.execute_from_commandline(['multi', 'frob', 'foo', 'bar']) + self.t.error.assert_called_with('Invalid command: frob') + + self.t.error = Mock() + self.t.execute_from_commandline(['multi']) + self.t.error.assert_called_with() + + self.t.error = Mock() + self.t.execute_from_commandline(['multi', '-foo']) + self.t.error.assert_called_with() + + self.t.execute_from_commandline( + ['multi', 'start', 'foo', + '--nosplash', '--quiet', '-q', '--verbose', '--no-color'], + ) + self.assertTrue(self.t.nosplash) + self.assertTrue(self.t.quiet) + self.assertTrue(self.t.verbose) + self.assertTrue(self.t.no_color) + + def test_stopwait(self): + self.t._stop_nodes = Mock() + self.t.stopwait(['foo', 'bar', 'baz'], 'celery worker') + self.assertEqual(self.t._stop_nodes.call_args[1]['retry'], 2) + + @patch('celery.bin.multi.MultiTool') + def test_main(self, MultiTool): + m = MultiTool.return_value = Mock() + with self.assertRaises(SystemExit): + main() + m.execute_from_commandline.assert_called_with(sys.argv) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_worker.py b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_worker.py new file mode 100644 index 0000000..bc63940 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/bin/test_worker.py @@ -0,0 +1,681 @@ +from __future__ import absolute_import + +import logging +import os +import sys + +from billiard import current_process +from kombu import Exchange, Queue + +from celery import platforms +from celery import signals +from celery.app import trace +from celery.apps import worker as cd +from celery.bin.worker import worker, main as worker_main +from celery.exceptions import ( + ImproperlyConfigured, WorkerShutdown, WorkerTerminate, +) +from celery.utils.log import ensure_process_aware_logger +from celery.worker import state + +from celery.tests.case import ( + AppCase, + Mock, + SkipTest, + disable_stdouts, + patch, + skip_if_pypy, + skip_if_jython, +) + +ensure_process_aware_logger() + + +class WorkerAppCase(AppCase): + + def tearDown(self): + super(WorkerAppCase, self).tearDown() + trace.reset_worker_optimizations() + + +class Worker(cd.Worker): + redirect_stdouts = False + + def start(self, *args, **kwargs): + self.on_start() + + +class test_Worker(WorkerAppCase): + Worker = Worker + + @disable_stdouts + def test_queues_string(self): + w = self.app.Worker() + w.setup_queues('foo,bar,baz') + self.assertTrue('foo' in self.app.amqp.queues) + + @disable_stdouts + def test_cpu_count(self): + with patch('celery.worker.cpu_count') as cpu_count: + cpu_count.side_effect = NotImplementedError() + w = self.app.Worker(concurrency=None) + self.assertEqual(w.concurrency, 2) + w = self.app.Worker(concurrency=5) + self.assertEqual(w.concurrency, 5) + + @disable_stdouts + def test_windows_B_option(self): + self.app.IS_WINDOWS = True + with self.assertRaises(SystemExit): + worker(app=self.app).run(beat=True) + + def test_setup_concurrency_very_early(self): + x = worker() + x.run = Mock() + with self.assertRaises(ImportError): + x.execute_from_commandline(['worker', '-P', 'xyzybox']) + + def test_run_from_argv_basic(self): + x = worker(app=self.app) + x.run = Mock() + x.maybe_detach = Mock() + + def run(*args, **kwargs): + pass + x.run = run + x.run_from_argv('celery', []) + self.assertTrue(x.maybe_detach.called) + + def test_maybe_detach(self): + x = worker(app=self.app) + with patch('celery.bin.worker.detached_celeryd') as detached: + x.maybe_detach([]) + self.assertFalse(detached.called) + with self.assertRaises(SystemExit): + x.maybe_detach(['--detach']) + self.assertTrue(detached.called) + + @disable_stdouts + def test_invalid_loglevel_gives_error(self): + x = worker(app=self.app) + with self.assertRaises(SystemExit): + x.run(loglevel='GRIM_REAPER') + + def test_no_loglevel(self): + self.app.Worker = Mock() + worker(app=self.app).run(loglevel=None) + + def test_tasklist(self): + worker = self.app.Worker() + self.assertTrue(worker.app.tasks) + self.assertTrue(worker.app.finalized) + self.assertTrue(worker.tasklist(include_builtins=True)) + worker.tasklist(include_builtins=False) + + def test_extra_info(self): + worker = self.app.Worker() + worker.loglevel = logging.WARNING + self.assertFalse(worker.extra_info()) + worker.loglevel = logging.INFO + self.assertTrue(worker.extra_info()) + + @disable_stdouts + def test_loglevel_string(self): + worker = self.Worker(app=self.app, loglevel='INFO') + self.assertEqual(worker.loglevel, logging.INFO) + + @disable_stdouts + def test_run_worker(self): + handlers = {} + + class Signals(platforms.Signals): + + def __setitem__(self, sig, handler): + handlers[sig] = handler + + p = platforms.signals + platforms.signals = Signals() + try: + w = self.Worker(app=self.app) + w._isatty = False + w.on_start() + for sig in 'SIGINT', 'SIGHUP', 'SIGTERM': + self.assertIn(sig, handlers) + + handlers.clear() + w = self.Worker(app=self.app) + w._isatty = True + w.on_start() + for sig in 'SIGINT', 'SIGTERM': + self.assertIn(sig, handlers) + self.assertNotIn('SIGHUP', handlers) + finally: + platforms.signals = p + + @disable_stdouts + def test_startup_info(self): + worker = self.Worker(app=self.app) + worker.on_start() + self.assertTrue(worker.startup_info()) + worker.loglevel = logging.DEBUG + self.assertTrue(worker.startup_info()) + worker.loglevel = logging.INFO + self.assertTrue(worker.startup_info()) + worker.autoscale = 13, 10 + self.assertTrue(worker.startup_info()) + + prev_loader = self.app.loader + worker = self.Worker(app=self.app, queues='foo,bar,baz,xuzzy,do,re,mi') + self.app.loader = Mock() + self.app.loader.__module__ = 'acme.baked_beans' + self.assertTrue(worker.startup_info()) + + self.app.loader = Mock() + self.app.loader.__module__ = 'celery.loaders.foo' + self.assertTrue(worker.startup_info()) + + from celery.loaders.app import AppLoader + self.app.loader = AppLoader(app=self.app) + self.assertTrue(worker.startup_info()) + + self.app.loader = prev_loader + worker.send_events = True + self.assertTrue(worker.startup_info()) + + # test when there are too few output lines + # to draft the ascii art onto + prev, cd.ARTLINES = cd.ARTLINES, ['the quick brown fox'] + try: + self.assertTrue(worker.startup_info()) + finally: + cd.ARTLINES = prev + + @disable_stdouts + def test_run(self): + self.Worker(app=self.app).on_start() + self.Worker(app=self.app, purge=True).on_start() + worker = self.Worker(app=self.app) + worker.on_start() + + @disable_stdouts + def test_purge_messages(self): + self.Worker(app=self.app).purge_messages() + + @disable_stdouts + def test_init_queues(self): + app = self.app + c = app.conf + app.amqp.queues = app.amqp.Queues({ + 'celery': {'exchange': 'celery', + 'routing_key': 'celery'}, + 'video': {'exchange': 'video', + 'routing_key': 'video'}, + }) + worker = self.Worker(app=self.app) + worker.setup_queues(['video']) + self.assertIn('video', app.amqp.queues) + self.assertIn('video', app.amqp.queues.consume_from) + self.assertIn('celery', app.amqp.queues) + self.assertNotIn('celery', app.amqp.queues.consume_from) + + c.CELERY_CREATE_MISSING_QUEUES = False + del(app.amqp.queues) + with self.assertRaises(ImproperlyConfigured): + self.Worker(app=self.app).setup_queues(['image']) + del(app.amqp.queues) + c.CELERY_CREATE_MISSING_QUEUES = True + worker = self.Worker(app=self.app) + worker.setup_queues(['image']) + self.assertIn('image', app.amqp.queues.consume_from) + self.assertEqual( + Queue('image', Exchange('image'), routing_key='image'), + app.amqp.queues['image'], + ) + + @disable_stdouts + def test_autoscale_argument(self): + worker1 = self.Worker(app=self.app, autoscale='10,3') + self.assertListEqual(worker1.autoscale, [10, 3]) + worker2 = self.Worker(app=self.app, autoscale='10') + self.assertListEqual(worker2.autoscale, [10, 0]) + self.assert_no_logging_side_effect() + + def test_include_argument(self): + worker1 = self.Worker(app=self.app, include='os') + self.assertListEqual(worker1.include, ['os']) + worker2 = self.Worker(app=self.app, + include='os,sys') + self.assertListEqual(worker2.include, ['os', 'sys']) + self.Worker(app=self.app, include=['os', 'sys']) + + @disable_stdouts + def test_unknown_loglevel(self): + with self.assertRaises(SystemExit): + worker(app=self.app).run(loglevel='ALIEN') + worker1 = self.Worker(app=self.app, loglevel=0xFFFF) + self.assertEqual(worker1.loglevel, 0xFFFF) + + @disable_stdouts + @patch('os._exit') + def test_warns_if_running_as_privileged_user(self, _exit): + app = self.app + if app.IS_WINDOWS: + raise SkipTest('Not applicable on Windows') + + with patch('os.getuid') as getuid: + getuid.return_value = 0 + self.app.conf.CELERY_ACCEPT_CONTENT = ['pickle'] + worker = self.Worker(app=self.app) + worker.on_start() + _exit.assert_called_with(1) + from celery import platforms + platforms.C_FORCE_ROOT = True + try: + with self.assertWarnsRegex( + RuntimeWarning, + r'absolutely not recommended'): + worker = self.Worker(app=self.app) + worker.on_start() + finally: + platforms.C_FORCE_ROOT = False + self.app.conf.CELERY_ACCEPT_CONTENT = ['json'] + with self.assertWarnsRegex( + RuntimeWarning, + r'absolutely not recommended'): + worker = self.Worker(app=self.app) + worker.on_start() + + @disable_stdouts + def test_redirect_stdouts(self): + self.Worker(app=self.app, redirect_stdouts=False) + with self.assertRaises(AttributeError): + sys.stdout.logger + + @disable_stdouts + def test_on_start_custom_logging(self): + self.app.log.redirect_stdouts = Mock() + worker = self.Worker(app=self.app, redirect_stoutds=True) + worker._custom_logging = True + worker.on_start() + self.assertFalse(self.app.log.redirect_stdouts.called) + + def test_setup_logging_no_color(self): + worker = self.Worker( + app=self.app, redirect_stdouts=False, no_color=True, + ) + prev, self.app.log.setup = self.app.log.setup, Mock() + try: + worker.setup_logging() + self.assertFalse(self.app.log.setup.call_args[1]['colorize']) + finally: + self.app.log.setup = prev + + @disable_stdouts + def test_startup_info_pool_is_str(self): + worker = self.Worker(app=self.app, redirect_stdouts=False) + worker.pool_cls = 'foo' + worker.startup_info() + + def test_redirect_stdouts_already_handled(self): + logging_setup = [False] + + @signals.setup_logging.connect + def on_logging_setup(**kwargs): + logging_setup[0] = True + + try: + worker = self.Worker(app=self.app, redirect_stdouts=False) + worker.app.log.already_setup = False + worker.setup_logging() + self.assertTrue(logging_setup[0]) + with self.assertRaises(AttributeError): + sys.stdout.logger + finally: + signals.setup_logging.disconnect(on_logging_setup) + + @disable_stdouts + def test_platform_tweaks_osx(self): + + class OSXWorker(Worker): + proxy_workaround_installed = False + + def osx_proxy_detection_workaround(self): + self.proxy_workaround_installed = True + + worker = OSXWorker(app=self.app, redirect_stdouts=False) + + def install_HUP_nosupport(controller): + controller.hup_not_supported_installed = True + + class Controller(object): + pass + + prev = cd.install_HUP_not_supported_handler + cd.install_HUP_not_supported_handler = install_HUP_nosupport + try: + worker.app.IS_OSX = True + controller = Controller() + worker.install_platform_tweaks(controller) + self.assertTrue(controller.hup_not_supported_installed) + self.assertTrue(worker.proxy_workaround_installed) + finally: + cd.install_HUP_not_supported_handler = prev + + @disable_stdouts + def test_general_platform_tweaks(self): + + restart_worker_handler_installed = [False] + + def install_worker_restart_handler(worker): + restart_worker_handler_installed[0] = True + + class Controller(object): + pass + + prev = cd.install_worker_restart_handler + cd.install_worker_restart_handler = install_worker_restart_handler + try: + worker = self.Worker(app=self.app) + worker.app.IS_OSX = False + worker.install_platform_tweaks(Controller()) + self.assertTrue(restart_worker_handler_installed[0]) + finally: + cd.install_worker_restart_handler = prev + + @disable_stdouts + def test_on_consumer_ready(self): + worker_ready_sent = [False] + + @signals.worker_ready.connect + def on_worker_ready(**kwargs): + worker_ready_sent[0] = True + + self.Worker(app=self.app).on_consumer_ready(object()) + self.assertTrue(worker_ready_sent[0]) + + +class test_funs(WorkerAppCase): + + def test_active_thread_count(self): + self.assertTrue(cd.active_thread_count()) + + @disable_stdouts + def test_set_process_status(self): + try: + __import__('setproctitle') + except ImportError: + raise SkipTest('setproctitle not installed') + worker = Worker(app=self.app, hostname='xyzza') + prev1, sys.argv = sys.argv, ['Arg0'] + try: + st = worker.set_process_status('Running') + self.assertIn('celeryd', st) + self.assertIn('xyzza', st) + self.assertIn('Running', st) + prev2, sys.argv = sys.argv, ['Arg0', 'Arg1'] + try: + st = worker.set_process_status('Running') + self.assertIn('celeryd', st) + self.assertIn('xyzza', st) + self.assertIn('Running', st) + self.assertIn('Arg1', st) + finally: + sys.argv = prev2 + finally: + sys.argv = prev1 + + @disable_stdouts + def test_parse_options(self): + cmd = worker() + cmd.app = self.app + opts, args = cmd.parse_options('worker', ['--concurrency=512', + '--heartbeat-interval=10']) + self.assertEqual(opts.concurrency, 512) + self.assertEqual(opts.heartbeat_interval, 10) + + @disable_stdouts + def test_main(self): + p, cd.Worker = cd.Worker, Worker + s, sys.argv = sys.argv, ['worker', '--discard'] + try: + worker_main(app=self.app) + finally: + cd.Worker = p + sys.argv = s + + +class test_signal_handlers(WorkerAppCase): + + class _Worker(object): + stopped = False + terminated = False + + def stop(self, in_sighandler=False): + self.stopped = True + + def terminate(self, in_sighandler=False): + self.terminated = True + + def psig(self, fun, *args, **kwargs): + handlers = {} + + class Signals(platforms.Signals): + def __setitem__(self, sig, handler): + handlers[sig] = handler + + p, platforms.signals = platforms.signals, Signals() + try: + fun(*args, **kwargs) + return handlers + finally: + platforms.signals = p + + @disable_stdouts + def test_worker_int_handler(self): + worker = self._Worker() + handlers = self.psig(cd.install_worker_int_handler, worker) + next_handlers = {} + state.should_stop = False + state.should_terminate = False + + class Signals(platforms.Signals): + + def __setitem__(self, sig, handler): + next_handlers[sig] = handler + + with patch('celery.apps.worker.active_thread_count') as c: + c.return_value = 3 + p, platforms.signals = platforms.signals, Signals() + try: + handlers['SIGINT']('SIGINT', object()) + self.assertTrue(state.should_stop) + finally: + platforms.signals = p + state.should_stop = False + + try: + next_handlers['SIGINT']('SIGINT', object()) + self.assertTrue(state.should_terminate) + finally: + state.should_terminate = False + + with patch('celery.apps.worker.active_thread_count') as c: + c.return_value = 1 + p, platforms.signals = platforms.signals, Signals() + try: + with self.assertRaises(WorkerShutdown): + handlers['SIGINT']('SIGINT', object()) + finally: + platforms.signals = p + + with self.assertRaises(WorkerTerminate): + next_handlers['SIGINT']('SIGINT', object()) + + @disable_stdouts + def test_worker_int_handler_only_stop_MainProcess(self): + try: + import _multiprocessing # noqa + except ImportError: + raise SkipTest('only relevant for multiprocessing') + process = current_process() + name, process.name = process.name, 'OtherProcess' + with patch('celery.apps.worker.active_thread_count') as c: + c.return_value = 3 + try: + worker = self._Worker() + handlers = self.psig(cd.install_worker_int_handler, worker) + handlers['SIGINT']('SIGINT', object()) + self.assertTrue(state.should_stop) + finally: + process.name = name + state.should_stop = False + + with patch('celery.apps.worker.active_thread_count') as c: + c.return_value = 1 + try: + worker = self._Worker() + handlers = self.psig(cd.install_worker_int_handler, worker) + with self.assertRaises(WorkerShutdown): + handlers['SIGINT']('SIGINT', object()) + finally: + process.name = name + state.should_stop = False + + @disable_stdouts + def test_install_HUP_not_supported_handler(self): + worker = self._Worker() + handlers = self.psig(cd.install_HUP_not_supported_handler, worker) + handlers['SIGHUP']('SIGHUP', object()) + + @disable_stdouts + def test_worker_term_hard_handler_only_stop_MainProcess(self): + try: + import _multiprocessing # noqa + except ImportError: + raise SkipTest('only relevant for multiprocessing') + process = current_process() + name, process.name = process.name, 'OtherProcess' + try: + with patch('celery.apps.worker.active_thread_count') as c: + c.return_value = 3 + worker = self._Worker() + handlers = self.psig( + cd.install_worker_term_hard_handler, worker) + try: + handlers['SIGQUIT']('SIGQUIT', object()) + self.assertTrue(state.should_terminate) + finally: + state.should_terminate = False + with patch('celery.apps.worker.active_thread_count') as c: + c.return_value = 1 + worker = self._Worker() + handlers = self.psig( + cd.install_worker_term_hard_handler, worker) + with self.assertRaises(WorkerTerminate): + handlers['SIGQUIT']('SIGQUIT', object()) + finally: + process.name = name + + @disable_stdouts + def test_worker_term_handler_when_threads(self): + with patch('celery.apps.worker.active_thread_count') as c: + c.return_value = 3 + worker = self._Worker() + handlers = self.psig(cd.install_worker_term_handler, worker) + try: + handlers['SIGTERM']('SIGTERM', object()) + self.assertTrue(state.should_stop) + finally: + state.should_stop = False + + @disable_stdouts + def test_worker_term_handler_when_single_thread(self): + with patch('celery.apps.worker.active_thread_count') as c: + c.return_value = 1 + worker = self._Worker() + handlers = self.psig(cd.install_worker_term_handler, worker) + try: + with self.assertRaises(WorkerShutdown): + handlers['SIGTERM']('SIGTERM', object()) + finally: + state.should_stop = False + + @patch('sys.__stderr__') + @skip_if_pypy + @skip_if_jython + def test_worker_cry_handler(self, stderr): + handlers = self.psig(cd.install_cry_handler) + self.assertIsNone(handlers['SIGUSR1']('SIGUSR1', object())) + self.assertTrue(stderr.write.called) + + @disable_stdouts + def test_worker_term_handler_only_stop_MainProcess(self): + try: + import _multiprocessing # noqa + except ImportError: + raise SkipTest('only relevant for multiprocessing') + process = current_process() + name, process.name = process.name, 'OtherProcess' + try: + with patch('celery.apps.worker.active_thread_count') as c: + c.return_value = 3 + worker = self._Worker() + handlers = self.psig(cd.install_worker_term_handler, worker) + handlers['SIGTERM']('SIGTERM', object()) + self.assertTrue(state.should_stop) + with patch('celery.apps.worker.active_thread_count') as c: + c.return_value = 1 + worker = self._Worker() + handlers = self.psig(cd.install_worker_term_handler, worker) + with self.assertRaises(WorkerShutdown): + handlers['SIGTERM']('SIGTERM', object()) + finally: + process.name = name + state.should_stop = False + + @disable_stdouts + @patch('celery.platforms.close_open_fds') + @patch('atexit.register') + @patch('os.close') + def test_worker_restart_handler(self, _close, register, close_open): + if getattr(os, 'execv', None) is None: + raise SkipTest('platform does not have excv') + argv = [] + + def _execv(*args): + argv.extend(args) + + execv, os.execv = os.execv, _execv + try: + worker = self._Worker() + handlers = self.psig(cd.install_worker_restart_handler, worker) + handlers['SIGHUP']('SIGHUP', object()) + self.assertTrue(state.should_stop) + self.assertTrue(register.called) + callback = register.call_args[0][0] + callback() + self.assertTrue(argv) + finally: + os.execv = execv + state.should_stop = False + + @disable_stdouts + def test_worker_term_hard_handler_when_threaded(self): + with patch('celery.apps.worker.active_thread_count') as c: + c.return_value = 3 + worker = self._Worker() + handlers = self.psig(cd.install_worker_term_hard_handler, worker) + try: + handlers['SIGQUIT']('SIGQUIT', object()) + self.assertTrue(state.should_terminate) + finally: + state.should_terminate = False + + @disable_stdouts + def test_worker_term_hard_handler_when_single_threaded(self): + with patch('celery.apps.worker.active_thread_count') as c: + c.return_value = 1 + worker = self._Worker() + handlers = self.psig(cd.install_worker_term_hard_handler, worker) + with self.assertRaises(WorkerTerminate): + handlers['SIGQUIT']('SIGQUIT', object()) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/case.py b/thesisenv/lib/python3.6/site-packages/celery/tests/case.py new file mode 100644 index 0000000..a9e65cd --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/case.py @@ -0,0 +1,880 @@ +from __future__ import absolute_import + +try: + import unittest # noqa + unittest.skip + from unittest.util import safe_repr, unorderable_list_difference +except AttributeError: + import unittest2 as unittest # noqa + from unittest2.util import safe_repr, unorderable_list_difference # noqa + +import importlib +import inspect +import logging +import numbers +import os +import platform +import re +import sys +import threading +import time +import types +import warnings + +from contextlib import contextmanager +from copy import deepcopy +from datetime import datetime, timedelta +from functools import partial, wraps +from types import ModuleType + +try: + from unittest import mock +except ImportError: + import mock # noqa +from nose import SkipTest +from kombu import Queue +from kombu.log import NullHandler +from kombu.utils import nested, symbol_by_name + +from celery import Celery +from celery.app import current_app +from celery.backends.cache import CacheBackend, DummyClient +from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning +from celery.five import ( + WhateverIO, builtins, items, reraise, + string_t, values, open_fqdn, +) +from celery.utils.functional import noop +from celery.utils.imports import qualname + +__all__ = [ + 'Case', 'AppCase', 'Mock', 'MagicMock', 'ANY', + 'patch', 'call', 'sentinel', 'skip_unless_module', + 'wrap_logger', 'with_environ', 'sleepdeprived', + 'skip_if_environ', 'todo', 'skip', 'skip_if', + 'skip_unless', 'mask_modules', 'override_stdouts', 'mock_module', + 'replace_module_value', 'sys_platform', 'reset_modules', + 'patch_modules', 'mock_context', 'mock_open', 'patch_many', + 'assert_signal_called', 'skip_if_pypy', + 'skip_if_jython', 'body_from_sig', 'restore_logging', +] +patch = mock.patch +call = mock.call +sentinel = mock.sentinel +MagicMock = mock.MagicMock +ANY = mock.ANY + +PY3 = sys.version_info[0] == 3 + +CASE_REDEFINES_SETUP = """\ +{name} (subclass of AppCase) redefines private "setUp", should be: "setup"\ +""" +CASE_REDEFINES_TEARDOWN = """\ +{name} (subclass of AppCase) redefines private "tearDown", \ +should be: "teardown"\ +""" +CASE_LOG_REDIRECT_EFFECT = """\ +Test {0} did not disable LoggingProxy for {1}\ +""" +CASE_LOG_LEVEL_EFFECT = """\ +Test {0} Modified the level of the root logger\ +""" +CASE_LOG_HANDLER_EFFECT = """\ +Test {0} Modified handlers for the root logger\ +""" + +CELERY_TEST_CONFIG = { + #: Don't want log output when running suite. + 'CELERYD_HIJACK_ROOT_LOGGER': False, + 'CELERY_SEND_TASK_ERROR_EMAILS': False, + 'CELERY_DEFAULT_QUEUE': 'testcelery', + 'CELERY_DEFAULT_EXCHANGE': 'testcelery', + 'CELERY_DEFAULT_ROUTING_KEY': 'testcelery', + 'CELERY_QUEUES': ( + Queue('testcelery', routing_key='testcelery'), + ), + 'CELERY_ENABLE_UTC': True, + 'CELERY_TIMEZONE': 'UTC', + 'CELERYD_LOG_COLOR': False, + + # Mongo results tests (only executed if installed and running) + 'CELERY_MONGODB_BACKEND_SETTINGS': { + 'host': os.environ.get('MONGO_HOST') or 'localhost', + 'port': os.environ.get('MONGO_PORT') or 27017, + 'database': os.environ.get('MONGO_DB') or 'celery_unittests', + 'taskmeta_collection': (os.environ.get('MONGO_TASKMETA_COLLECTION') or + 'taskmeta_collection'), + 'user': os.environ.get('MONGO_USER'), + 'password': os.environ.get('MONGO_PASSWORD'), + } +} + + +class Trap(object): + + def __getattr__(self, name): + raise RuntimeError('Test depends on current_app') + + +class UnitLogging(symbol_by_name(Celery.log_cls)): + + def __init__(self, *args, **kwargs): + super(UnitLogging, self).__init__(*args, **kwargs) + self.already_setup = True + + +def UnitApp(name=None, broker=None, backend=None, + set_as_current=False, log=UnitLogging, **kwargs): + + app = Celery(name or 'celery.tests', + broker=broker or 'memory://', + backend=backend or 'cache+memory://', + set_as_current=set_as_current, + log=log, + **kwargs) + app.add_defaults(deepcopy(CELERY_TEST_CONFIG)) + return app + + +class Mock(mock.Mock): + + def __init__(self, *args, **kwargs): + attrs = kwargs.pop('attrs', None) or {} + super(Mock, self).__init__(*args, **kwargs) + for attr_name, attr_value in items(attrs): + setattr(self, attr_name, attr_value) + + +class _ContextMock(Mock): + """Dummy class implementing __enter__ and __exit__ + as the with statement requires these to be implemented + in the class, not just the instance.""" + + def __enter__(self): + pass + + def __exit__(self, *exc_info): + pass + + +def ContextMock(*args, **kwargs): + obj = _ContextMock(*args, **kwargs) + obj.attach_mock(_ContextMock(), '__enter__') + obj.attach_mock(_ContextMock(), '__exit__') + obj.__enter__.return_value = obj + # if __exit__ return a value the exception is ignored, + # so it must return None here. + obj.__exit__.return_value = None + return obj + + +def _bind(f, o): + @wraps(f) + def bound_meth(*fargs, **fkwargs): + return f(o, *fargs, **fkwargs) + return bound_meth + + +if PY3: # pragma: no cover + def _get_class_fun(meth): + return meth +else: + def _get_class_fun(meth): + return meth.__func__ + + +class MockCallbacks(object): + + def __new__(cls, *args, **kwargs): + r = Mock(name=cls.__name__) + _get_class_fun(cls.__init__)(r, *args, **kwargs) + for key, value in items(vars(cls)): + if key not in ('__dict__', '__weakref__', '__new__', '__init__'): + if inspect.ismethod(value) or inspect.isfunction(value): + r.__getattr__(key).side_effect = _bind(value, r) + else: + r.__setattr__(key, value) + return r + + +def skip_unless_module(module): + + def _inner(fun): + + @wraps(fun) + def __inner(*args, **kwargs): + try: + importlib.import_module(module) + except ImportError: + raise SkipTest('Does not have %s' % (module, )) + + return fun(*args, **kwargs) + + return __inner + return _inner + + +# -- adds assertWarns from recent unittest2, not in Python 2.7. + +class _AssertRaisesBaseContext(object): + + def __init__(self, expected, test_case, callable_obj=None, + expected_regex=None): + self.expected = expected + self.failureException = test_case.failureException + self.obj_name = None + if isinstance(expected_regex, string_t): + expected_regex = re.compile(expected_regex) + self.expected_regex = expected_regex + + +def _is_magic_module(m): + # some libraries create custom module types that are lazily + # lodaded, e.g. Django installs some modules in sys.modules that + # will load _tkinter and other shit when touched. + + # pyflakes refuses to accept 'noqa' for this isinstance. + cls, modtype = type(m), types.ModuleType + try: + variables = vars(cls) + except TypeError: + return True + else: + return (cls is not modtype and ( + '__getattr__' in variables or + '__getattribute__' in variables)) + + +class _AssertWarnsContext(_AssertRaisesBaseContext): + """A context manager used to implement TestCase.assertWarns* methods.""" + + def __enter__(self): + # The __warningregistry__'s need to be in a pristine state for tests + # to work properly. + warnings.resetwarnings() + for v in list(values(sys.modules)): + # do not evaluate Django moved modules and other lazily + # initialized modules. + if v and not _is_magic_module(v): + # use raw __getattribute__ to protect even better from + # lazily loaded modules + try: + object.__getattribute__(v, '__warningregistry__') + except AttributeError: + pass + else: + object.__setattr__(v, '__warningregistry__', {}) + self.warnings_manager = warnings.catch_warnings(record=True) + self.warnings = self.warnings_manager.__enter__() + warnings.simplefilter('always', self.expected) + return self + + def __exit__(self, exc_type, exc_value, tb): + self.warnings_manager.__exit__(exc_type, exc_value, tb) + if exc_type is not None: + # let unexpected exceptions pass through + return + try: + exc_name = self.expected.__name__ + except AttributeError: + exc_name = str(self.expected) + first_matching = None + for m in self.warnings: + w = m.message + if not isinstance(w, self.expected): + continue + if first_matching is None: + first_matching = w + if (self.expected_regex is not None and + not self.expected_regex.search(str(w))): + continue + # store warning for later retrieval + self.warning = w + self.filename = m.filename + self.lineno = m.lineno + return + # Now we simply try to choose a helpful failure message + if first_matching is not None: + raise self.failureException( + '%r does not match %r' % ( + self.expected_regex.pattern, str(first_matching))) + if self.obj_name: + raise self.failureException( + '%s not triggered by %s' % (exc_name, self.obj_name)) + else: + raise self.failureException('%s not triggered' % exc_name) + + +class Case(unittest.TestCase): + + def assertWarns(self, expected_warning): + return _AssertWarnsContext(expected_warning, self, None) + + def assertWarnsRegex(self, expected_warning, expected_regex): + return _AssertWarnsContext(expected_warning, self, + None, expected_regex) + + @contextmanager + def assertDeprecated(self): + with self.assertWarnsRegex(CDeprecationWarning, + r'scheduled for removal'): + yield + + @contextmanager + def assertPendingDeprecation(self): + with self.assertWarnsRegex(CPendingDeprecationWarning, + r'scheduled for deprecation'): + yield + + def assertDictContainsSubset(self, expected, actual, msg=None): + missing, mismatched = [], [] + + for key, value in items(expected): + if key not in actual: + missing.append(key) + elif value != actual[key]: + mismatched.append('%s, expected: %s, actual: %s' % ( + safe_repr(key), safe_repr(value), + safe_repr(actual[key]))) + + if not (missing or mismatched): + return + + standard_msg = '' + if missing: + standard_msg = 'Missing: %s' % ','.join(map(safe_repr, missing)) + + if mismatched: + if standard_msg: + standard_msg += '; ' + standard_msg += 'Mismatched values: %s' % ( + ','.join(mismatched)) + + self.fail(self._formatMessage(msg, standard_msg)) + + def assertItemsEqual(self, expected_seq, actual_seq, msg=None): + missing = unexpected = None + try: + expected = sorted(expected_seq) + actual = sorted(actual_seq) + except TypeError: + # Unsortable items (example: set(), complex(), ...) + expected = list(expected_seq) + actual = list(actual_seq) + missing, unexpected = unorderable_list_difference( + expected, actual) + else: + return self.assertSequenceEqual(expected, actual, msg=msg) + + errors = [] + if missing: + errors.append( + 'Expected, but missing:\n %s' % (safe_repr(missing), ) + ) + if unexpected: + errors.append( + 'Unexpected, but present:\n %s' % (safe_repr(unexpected), ) + ) + if errors: + standardMsg = '\n'.join(errors) + self.fail(self._formatMessage(msg, standardMsg)) + + +def depends_on_current_app(fun): + if inspect.isclass(fun): + fun.contained = False + else: + @wraps(fun) + def __inner(self, *args, **kwargs): + self.app.set_current() + return fun(self, *args, **kwargs) + return __inner + + +class AppCase(Case): + contained = True + + def __init__(self, *args, **kwargs): + super(AppCase, self).__init__(*args, **kwargs) + if self.__class__.__dict__.get('setUp'): + raise RuntimeError( + CASE_REDEFINES_SETUP.format(name=qualname(self)), + ) + if self.__class__.__dict__.get('tearDown'): + raise RuntimeError( + CASE_REDEFINES_TEARDOWN.format(name=qualname(self)), + ) + + def Celery(self, *args, **kwargs): + return UnitApp(*args, **kwargs) + + def setUp(self): + self._threads_at_setup = list(threading.enumerate()) + from celery import _state + from celery import result + result.task_join_will_block = \ + _state.task_join_will_block = lambda: False + self._current_app = current_app() + self._default_app = _state.default_app + trap = Trap() + self._prev_tls = _state._tls + _state.set_default_app(trap) + + class NonTLS(object): + current_app = trap + _state._tls = NonTLS() + + self.app = self.Celery(set_as_current=False) + if not self.contained: + self.app.set_current() + root = logging.getLogger() + self.__rootlevel = root.level + self.__roothandlers = root.handlers + _state._set_task_join_will_block(False) + try: + self.setup() + except: + self._teardown_app() + raise + + def _teardown_app(self): + from celery.utils.log import LoggingProxy + assert sys.stdout + assert sys.stderr + assert sys.__stdout__ + assert sys.__stderr__ + this = self._get_test_name() + if isinstance(sys.stdout, (LoggingProxy, Mock)) or \ + isinstance(sys.__stdout__, (LoggingProxy, Mock)): + raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stdout')) + if isinstance(sys.stderr, (LoggingProxy, Mock)) or \ + isinstance(sys.__stderr__, (LoggingProxy, Mock)): + raise RuntimeError(CASE_LOG_REDIRECT_EFFECT.format(this, 'stderr')) + backend = self.app.__dict__.get('backend') + if backend is not None: + if isinstance(backend, CacheBackend): + if isinstance(backend.client, DummyClient): + backend.client.cache.clear() + backend._cache.clear() + from celery import _state + _state._set_task_join_will_block(False) + + _state.set_default_app(self._default_app) + _state._tls = self._prev_tls + _state._tls.current_app = self._current_app + if self.app is not self._current_app: + self.app.close() + self.app = None + self.assertEqual( + self._threads_at_setup, list(threading.enumerate()), + ) + + def _get_test_name(self): + return '.'.join([self.__class__.__name__, self._testMethodName]) + + def tearDown(self): + try: + self.teardown() + finally: + self._teardown_app() + self.assert_no_logging_side_effect() + + def assert_no_logging_side_effect(self): + this = self._get_test_name() + root = logging.getLogger() + if root.level != self.__rootlevel: + raise RuntimeError(CASE_LOG_LEVEL_EFFECT.format(this)) + if root.handlers != self.__roothandlers: + raise RuntimeError(CASE_LOG_HANDLER_EFFECT.format(this)) + + def setup(self): + pass + + def teardown(self): + pass + + +def get_handlers(logger): + return [h for h in logger.handlers if not isinstance(h, NullHandler)] + + +@contextmanager +def wrap_logger(logger, loglevel=logging.ERROR): + old_handlers = get_handlers(logger) + sio = WhateverIO() + siohandler = logging.StreamHandler(sio) + logger.handlers = [siohandler] + + try: + yield sio + finally: + logger.handlers = old_handlers + + +def with_environ(env_name, env_value): + + def _envpatched(fun): + + @wraps(fun) + def _patch_environ(*args, **kwargs): + prev_val = os.environ.get(env_name) + os.environ[env_name] = env_value + try: + return fun(*args, **kwargs) + finally: + os.environ[env_name] = prev_val or '' + + return _patch_environ + return _envpatched + + +def sleepdeprived(module=time): + + def _sleepdeprived(fun): + + @wraps(fun) + def __sleepdeprived(*args, **kwargs): + old_sleep = module.sleep + module.sleep = noop + try: + return fun(*args, **kwargs) + finally: + module.sleep = old_sleep + + return __sleepdeprived + + return _sleepdeprived + + +def skip_if_environ(env_var_name): + + def _wrap_test(fun): + + @wraps(fun) + def _skips_if_environ(*args, **kwargs): + if os.environ.get(env_var_name): + raise SkipTest('SKIP %s: %s set\n' % ( + fun.__name__, env_var_name)) + return fun(*args, **kwargs) + + return _skips_if_environ + + return _wrap_test + + +def _skip_test(reason, sign): + + def _wrap_test(fun): + + @wraps(fun) + def _skipped_test(*args, **kwargs): + raise SkipTest('%s: %s' % (sign, reason)) + + return _skipped_test + return _wrap_test + + +def todo(reason): + """TODO test decorator.""" + return _skip_test(reason, 'TODO') + + +def skip(reason): + """Skip test decorator.""" + return _skip_test(reason, 'SKIP') + + +def skip_if(predicate, reason): + """Skip test if predicate is :const:`True`.""" + + def _inner(fun): + return predicate and skip(reason)(fun) or fun + + return _inner + + +def skip_unless(predicate, reason): + """Skip test if predicate is :const:`False`.""" + return skip_if(not predicate, reason) + + +# Taken from +# http://bitbucket.org/runeh/snippets/src/tip/missing_modules.py +@contextmanager +def mask_modules(*modnames): + """Ban some modules from being importable inside the context + + For example: + + >>> with mask_modules('sys'): + ... try: + ... import sys + ... except ImportError: + ... print('sys not found') + sys not found + + >>> import sys # noqa + >>> sys.version + (2, 5, 2, 'final', 0) + + """ + + realimport = builtins.__import__ + + def myimp(name, *args, **kwargs): + if name in modnames: + raise ImportError('No module named %s' % name) + else: + return realimport(name, *args, **kwargs) + + builtins.__import__ = myimp + try: + yield True + finally: + builtins.__import__ = realimport + + +@contextmanager +def override_stdouts(): + """Override `sys.stdout` and `sys.stderr` with `WhateverIO`.""" + prev_out, prev_err = sys.stdout, sys.stderr + prev_rout, prev_rerr = sys.__stdout__, sys.__stderr__ + mystdout, mystderr = WhateverIO(), WhateverIO() + sys.stdout = sys.__stdout__ = mystdout + sys.stderr = sys.__stderr__ = mystderr + + try: + yield mystdout, mystderr + finally: + sys.stdout = prev_out + sys.stderr = prev_err + sys.__stdout__ = prev_rout + sys.__stderr__ = prev_rerr + + +def disable_stdouts(fun): + + @wraps(fun) + def disable(*args, **kwargs): + with override_stdouts(): + return fun(*args, **kwargs) + return disable + + +def _old_patch(module, name, mocked): + module = importlib.import_module(module) + + def _patch(fun): + + @wraps(fun) + def __patched(*args, **kwargs): + prev = getattr(module, name) + setattr(module, name, mocked) + try: + return fun(*args, **kwargs) + finally: + setattr(module, name, prev) + return __patched + return _patch + + +@contextmanager +def replace_module_value(module, name, value=None): + has_prev = hasattr(module, name) + prev = getattr(module, name, None) + if value: + setattr(module, name, value) + else: + try: + delattr(module, name) + except AttributeError: + pass + try: + yield + finally: + if prev is not None: + setattr(module, name, prev) + if not has_prev: + try: + delattr(module, name) + except AttributeError: + pass +pypy_version = partial( + replace_module_value, sys, 'pypy_version_info', +) +platform_pyimp = partial( + replace_module_value, platform, 'python_implementation', +) + + +@contextmanager +def sys_platform(value): + prev, sys.platform = sys.platform, value + try: + yield + finally: + sys.platform = prev + + +@contextmanager +def reset_modules(*modules): + prev = dict((k, sys.modules.pop(k)) for k in modules if k in sys.modules) + try: + yield + finally: + sys.modules.update(prev) + + +@contextmanager +def patch_modules(*modules): + prev = {} + for mod in modules: + prev[mod] = sys.modules.get(mod) + sys.modules[mod] = ModuleType(mod) + try: + yield + finally: + for name, mod in items(prev): + if mod is None: + sys.modules.pop(name, None) + else: + sys.modules[name] = mod + + +@contextmanager +def mock_module(*names): + prev = {} + + class MockModule(ModuleType): + + def __getattr__(self, attr): + setattr(self, attr, Mock()) + return ModuleType.__getattribute__(self, attr) + + mods = [] + for name in names: + try: + prev[name] = sys.modules[name] + except KeyError: + pass + mod = sys.modules[name] = MockModule(name) + mods.append(mod) + try: + yield mods + finally: + for name in names: + try: + sys.modules[name] = prev[name] + except KeyError: + try: + del(sys.modules[name]) + except KeyError: + pass + + +@contextmanager +def mock_context(mock, typ=Mock): + context = mock.return_value = Mock() + context.__enter__ = typ() + context.__exit__ = typ() + + def on_exit(*x): + if x[0]: + reraise(x[0], x[1], x[2]) + context.__exit__.side_effect = on_exit + context.__enter__.return_value = context + try: + yield context + finally: + context.reset() + + +@contextmanager +def mock_open(typ=WhateverIO, side_effect=None): + with patch(open_fqdn) as open_: + with mock_context(open_) as context: + if side_effect is not None: + context.__enter__.side_effect = side_effect + val = context.__enter__.return_value = typ() + val.__exit__ = Mock() + yield val + + +def patch_many(*targets): + return nested(*[patch(target) for target in targets]) + + +@contextmanager +def assert_signal_called(signal, **expected): + handler = Mock() + call_handler = partial(handler) + signal.connect(call_handler) + try: + yield handler + finally: + signal.disconnect(call_handler) + handler.assert_called_with(signal=signal, **expected) + + +def skip_if_pypy(fun): + + @wraps(fun) + def _inner(*args, **kwargs): + if getattr(sys, 'pypy_version_info', None): + raise SkipTest('does not work on PyPy') + return fun(*args, **kwargs) + return _inner + + +def skip_if_jython(fun): + + @wraps(fun) + def _inner(*args, **kwargs): + if sys.platform.startswith('java'): + raise SkipTest('does not work on Jython') + return fun(*args, **kwargs) + return _inner + + +def body_from_sig(app, sig, utc=True): + sig.freeze() + callbacks = sig.options.pop('link', None) + errbacks = sig.options.pop('link_error', None) + countdown = sig.options.pop('countdown', None) + if countdown: + eta = app.now() + timedelta(seconds=countdown) + else: + eta = sig.options.pop('eta', None) + if eta and isinstance(eta, datetime): + eta = eta.isoformat() + expires = sig.options.pop('expires', None) + if expires and isinstance(expires, numbers.Real): + expires = app.now() + timedelta(seconds=expires) + if expires and isinstance(expires, datetime): + expires = expires.isoformat() + return { + 'task': sig.task, + 'id': sig.id, + 'args': sig.args, + 'kwargs': sig.kwargs, + 'callbacks': [dict(s) for s in callbacks] if callbacks else None, + 'errbacks': [dict(s) for s in errbacks] if errbacks else None, + 'eta': eta, + 'utc': utc, + 'expires': expires, + } + + +@contextmanager +def restore_logging(): + outs = sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ + root = logging.getLogger() + level = root.level + handlers = root.handlers + + try: + yield + finally: + sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__ = outs + root.level = level + root.handlers[:] = handlers diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat.py b/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat.py new file mode 100644 index 0000000..02c7f7d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat.py @@ -0,0 +1,82 @@ +from __future__ import absolute_import + +from datetime import timedelta + +import sys +sys.modules.pop('celery.task', None) + +from celery.schedules import schedule # noqa +from celery.task import ( # noqa + periodic_task, + PeriodicTask +) +from celery.utils.timeutils import timedelta_seconds # noqa + +from celery.tests.case import AppCase, depends_on_current_app # noqa + + +class test_Task(AppCase): + + def test_base_task_inherits_magic_kwargs_from_app(self): + from celery.task import Task as OldTask + + class timkX(OldTask): + abstract = True + + with self.Celery(set_as_current=False, + accept_magic_kwargs=True) as app: + timkX.bind(app) + # see #918 + self.assertFalse(timkX.accept_magic_kwargs) + + from celery import Task as NewTask + + class timkY(NewTask): + abstract = True + + timkY.bind(app) + self.assertFalse(timkY.accept_magic_kwargs) + + +@depends_on_current_app +class test_periodic_tasks(AppCase): + + def setup(self): + @periodic_task(app=self.app, shared=False, + run_every=schedule(timedelta(hours=1), app=self.app)) + def my_periodic(): + pass + self.my_periodic = my_periodic + + def now(self): + return self.app.now() + + def test_must_have_run_every(self): + with self.assertRaises(NotImplementedError): + type('Foo', (PeriodicTask, ), {'__module__': __name__}) + + def test_remaining_estimate(self): + s = self.my_periodic.run_every + self.assertIsInstance( + s.remaining_estimate(s.maybe_make_aware(self.now())), + timedelta) + + def test_is_due_not_due(self): + due, remaining = self.my_periodic.run_every.is_due(self.now()) + self.assertFalse(due) + # This assertion may fail if executed in the + # first minute of an hour, thus 59 instead of 60 + self.assertGreater(remaining, 59) + + def test_is_due(self): + p = self.my_periodic + due, remaining = p.run_every.is_due( + self.now() - p.run_every.run_every, + ) + self.assertTrue(due) + self.assertEqual(remaining, + timedelta_seconds(p.run_every.run_every)) + + def test_schedule_repr(self): + p = self.my_periodic + self.assertTrue(repr(p.run_every)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat_utils.py b/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat_utils.py new file mode 100644 index 0000000..b041a0b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_compat_utils.py @@ -0,0 +1,50 @@ +from __future__ import absolute_import + +import celery + +from celery.app.task import Task as ModernTask +from celery.task.base import Task as CompatTask + +from celery.tests.case import AppCase, depends_on_current_app + + +@depends_on_current_app +class test_MagicModule(AppCase): + + def test_class_property_set_without_type(self): + self.assertTrue(ModernTask.__dict__['app'].__get__(CompatTask())) + + def test_class_property_set_on_class(self): + self.assertIs(ModernTask.__dict__['app'].__set__(None, None), + ModernTask.__dict__['app']) + + def test_class_property_set(self): + + class X(CompatTask): + pass + ModernTask.__dict__['app'].__set__(X(), self.app) + self.assertIs(X.app, self.app) + + def test_dir(self): + self.assertTrue(dir(celery.messaging)) + + def test_direct(self): + self.assertTrue(celery.task) + + def test_app_attrs(self): + self.assertEqual(celery.task.control.broadcast, + celery.current_app.control.broadcast) + + def test_decorators_task(self): + @celery.decorators.task + def _test_decorators_task(): + pass + + self.assertTrue(_test_decorators_task.accept_magic_kwargs) + + def test_decorators_periodic_task(self): + @celery.decorators.periodic_task(run_every=3600) + def _test_decorators_ptask(): + pass + + self.assertTrue(_test_decorators_ptask.accept_magic_kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_decorators.py b/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_decorators.py new file mode 100644 index 0000000..9f5dff9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_decorators.py @@ -0,0 +1,39 @@ +from __future__ import absolute_import + +import warnings + +from celery.task import base + +from celery.tests.case import AppCase, depends_on_current_app + + +def add(x, y): + return x + y + + +@depends_on_current_app +class test_decorators(AppCase): + + def test_task_alias(self): + from celery import task + self.assertTrue(task.__file__) + self.assertTrue(task(add)) + + def setup(self): + with warnings.catch_warnings(record=True): + from celery import decorators + self.decorators = decorators + + def assertCompatDecorator(self, decorator, type, **opts): + task = decorator(**opts)(add) + self.assertEqual(task(8, 8), 16) + self.assertTrue(task.accept_magic_kwargs) + self.assertIsInstance(task, type) + + def test_task(self): + self.assertCompatDecorator(self.decorators.task, base.BaseTask) + + def test_periodic_task(self): + self.assertCompatDecorator(self.decorators.periodic_task, + base.BaseTask, + run_every=1) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_http.py b/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_http.py new file mode 100644 index 0000000..08505f8 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/compat_modules/test_http.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import, unicode_literals + +from contextlib import contextmanager +from functools import wraps +try: + from urllib import addinfourl +except ImportError: # py3k + from urllib.request import addinfourl # noqa + +from anyjson import dumps +from kombu.utils.encoding import from_utf8 + +from celery.five import WhateverIO, items +from celery.task import http +from celery.tests.case import AppCase, Case + + +@contextmanager +def mock_urlopen(response_method): + + urlopen = http.urlopen + + @wraps(urlopen) + def _mocked(url, *args, **kwargs): + response_data, headers = response_method(url) + return addinfourl(WhateverIO(response_data), headers, url) + + http.urlopen = _mocked + + try: + yield True + finally: + http.urlopen = urlopen + + +def _response(res): + return lambda r: (res, []) + + +def success_response(value): + return _response(dumps({'status': 'success', 'retval': value})) + + +def fail_response(reason): + return _response(dumps({'status': 'failure', 'reason': reason})) + + +def unknown_response(): + return _response(dumps({'status': 'u.u.u.u', 'retval': True})) + + +class test_encodings(Case): + + def test_utf8dict(self): + uk = 'foobar' + d = {'følelser ær langé': 'ærbadægzaååÆØÅ', + from_utf8(uk): from_utf8('xuzzybaz')} + + for key, value in items(http.utf8dict(items(d))): + self.assertIsInstance(key, str) + self.assertIsInstance(value, str) + + +class test_MutableURL(Case): + + def test_url_query(self): + url = http.MutableURL('http://example.com?x=10&y=20&z=Foo') + self.assertDictContainsSubset({'x': '10', + 'y': '20', + 'z': 'Foo'}, url.query) + url.query['name'] = 'George' + url = http.MutableURL(str(url)) + self.assertDictContainsSubset({'x': '10', + 'y': '20', + 'z': 'Foo', + 'name': 'George'}, url.query) + + def test_url_keeps_everything(self): + url = 'https://e.com:808/foo/bar#zeta?x=10&y=20' + url = http.MutableURL(url) + + self.assertEqual( + str(url).split('?')[0], + 'https://e.com:808/foo/bar#zeta', + ) + + def test___repr__(self): + url = http.MutableURL('http://e.com/foo/bar') + self.assertTrue(repr(url).startswith(' 50: + return True + raise err + finally: + called[0] += 1 + sock.return_value.bind.side_effect = effect + with Rdb(out=out): + pass diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/events/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/events/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_cursesmon.py b/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_cursesmon.py new file mode 100644 index 0000000..c8e6151 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_cursesmon.py @@ -0,0 +1,70 @@ +from __future__ import absolute_import + +from celery.tests.case import AppCase, SkipTest + + +class MockWindow(object): + + def getmaxyx(self): + return self.y, self.x + + +class test_CursesDisplay(AppCase): + + def setup(self): + try: + import curses # noqa + except ImportError: + raise SkipTest('curses monitor requires curses') + + from celery.events import cursesmon + self.monitor = cursesmon.CursesMonitor(object(), app=self.app) + self.win = MockWindow() + self.monitor.win = self.win + + def test_format_row_with_default_widths(self): + self.win.x, self.win.y = 91, 24 + row = self.monitor.format_row( + '783da208-77d0-40ca-b3d6-37dd6dbb55d3', + 'task.task.task.task.task.task.task.task.task.tas', + 'workerworkerworkerworkerworkerworkerworkerworker', + '21:13:20', + 'SUCCESS') + self.assertEqual('783da208-77d0-40ca-b3d6-37dd6dbb55d3 ' + 'workerworker... task.task.[.]tas 21:13:20 SUCCESS ', + row) + + def test_format_row_with_truncated_uuid(self): + self.win.x, self.win.y = 80, 24 + row = self.monitor.format_row( + '783da208-77d0-40ca-b3d6-37dd6dbb55d3', + 'task.task.task.task.task.task.task.task.task.tas', + 'workerworkerworkerworkerworkerworkerworkerworker', + '21:13:20', + 'SUCCESS') + self.assertEqual('783da208-77d0-40ca-b3d... workerworker... ' + 'task.task.[.]tas 21:13:20 SUCCESS ', + row) + + def test_format_title_row(self): + self.win.x, self.win.y = 80, 24 + row = self.monitor.format_row('UUID', 'TASK', + 'WORKER', 'TIME', 'STATE') + self.assertEqual('UUID WORKER ' + 'TASK TIME STATE ', + row) + + def test_format_row_for_wide_screen_with_short_uuid(self): + self.win.x, self.win.y = 140, 24 + row = self.monitor.format_row( + '783da208-77d0-40ca-b3d6-37dd6dbb55d3', + 'task.task.task.task.task.task.task.task.task.tas', + 'workerworkerworkerworkerworkerworkerworkerworker', + '21:13:20', + 'SUCCESS') + self.assertEqual(136, len(row)) + self.assertEqual('783da208-77d0-40ca-b3d6-37dd6dbb55d3 ' + 'workerworkerworkerworkerworkerworker... ' + 'task.task.task.task.task.task.task.[.]tas ' + '21:13:20 SUCCESS ', + row) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_events.py b/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_events.py new file mode 100644 index 0000000..791f416 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_events.py @@ -0,0 +1,260 @@ +from __future__ import absolute_import + +import socket + +from celery.events import Event +from celery.tests.case import AppCase, Mock + + +class MockProducer(object): + raise_on_publish = False + + def __init__(self, *args, **kwargs): + self.sent = [] + + def publish(self, msg, *args, **kwargs): + if self.raise_on_publish: + raise KeyError() + self.sent.append(msg) + + def close(self): + pass + + def has_event(self, kind): + for event in self.sent: + if event['type'] == kind: + return event + return False + + +class test_Event(AppCase): + + def test_constructor(self): + event = Event('world war II') + self.assertEqual(event['type'], 'world war II') + self.assertTrue(event['timestamp']) + + +class test_EventDispatcher(AppCase): + + def test_redis_uses_fanout_exchange(self): + self.app.connection = Mock() + conn = self.app.connection.return_value = Mock() + conn.transport.driver_type = 'redis' + + dispatcher = self.app.events.Dispatcher(conn, enabled=False) + self.assertEqual(dispatcher.exchange.type, 'fanout') + + def test_others_use_topic_exchange(self): + self.app.connection = Mock() + conn = self.app.connection.return_value = Mock() + conn.transport.driver_type = 'amqp' + dispatcher = self.app.events.Dispatcher(conn, enabled=False) + self.assertEqual(dispatcher.exchange.type, 'topic') + + def test_takes_channel_connection(self): + x = self.app.events.Dispatcher(channel=Mock()) + self.assertIs(x.connection, x.channel.connection.client) + + def test_sql_transports_disabled(self): + conn = Mock() + conn.transport.driver_type = 'sql' + x = self.app.events.Dispatcher(connection=conn) + self.assertFalse(x.enabled) + + def test_send(self): + producer = MockProducer() + producer.connection = self.app.connection() + connection = Mock() + connection.transport.driver_type = 'amqp' + eventer = self.app.events.Dispatcher(connection, enabled=False, + buffer_while_offline=False) + eventer.producer = producer + eventer.enabled = True + eventer.send('World War II', ended=True) + self.assertTrue(producer.has_event('World War II')) + eventer.enabled = False + eventer.send('World War III') + self.assertFalse(producer.has_event('World War III')) + + evs = ('Event 1', 'Event 2', 'Event 3') + eventer.enabled = True + eventer.producer.raise_on_publish = True + eventer.buffer_while_offline = False + with self.assertRaises(KeyError): + eventer.send('Event X') + eventer.buffer_while_offline = True + for ev in evs: + eventer.send(ev) + eventer.producer.raise_on_publish = False + eventer.flush() + for ev in evs: + self.assertTrue(producer.has_event(ev)) + + buf = eventer._outbound_buffer = Mock() + buf.popleft.side_effect = IndexError() + eventer.flush() + + def test_enter_exit(self): + with self.app.connection() as conn: + d = self.app.events.Dispatcher(conn) + d.close = Mock() + with d as _d: + self.assertTrue(_d) + d.close.assert_called_with() + + def test_enable_disable_callbacks(self): + on_enable = Mock() + on_disable = Mock() + with self.app.connection() as conn: + with self.app.events.Dispatcher(conn, enabled=False) as d: + d.on_enabled.add(on_enable) + d.on_disabled.add(on_disable) + d.enable() + on_enable.assert_called_with() + d.disable() + on_disable.assert_called_with() + + def test_enabled_disable(self): + connection = self.app.connection() + channel = connection.channel() + try: + dispatcher = self.app.events.Dispatcher(connection, + enabled=True) + dispatcher2 = self.app.events.Dispatcher(connection, + enabled=True, + channel=channel) + self.assertTrue(dispatcher.enabled) + self.assertTrue(dispatcher.producer.channel) + self.assertEqual(dispatcher.producer.serializer, + self.app.conf.CELERY_EVENT_SERIALIZER) + + created_channel = dispatcher.producer.channel + dispatcher.disable() + dispatcher.disable() # Disable with no active producer + dispatcher2.disable() + self.assertFalse(dispatcher.enabled) + self.assertIsNone(dispatcher.producer) + self.assertFalse(dispatcher2.channel.closed, + 'does not close manually provided channel') + + dispatcher.enable() + self.assertTrue(dispatcher.enabled) + self.assertTrue(dispatcher.producer) + + # XXX test compat attribute + self.assertIs(dispatcher.publisher, dispatcher.producer) + prev, dispatcher.publisher = dispatcher.producer, 42 + try: + self.assertEqual(dispatcher.producer, 42) + finally: + dispatcher.producer = prev + finally: + channel.close() + connection.close() + self.assertTrue(created_channel.closed) + + +class test_EventReceiver(AppCase): + + def test_process(self): + + message = {'type': 'world-war'} + + got_event = [False] + + def my_handler(event): + got_event[0] = True + + connection = Mock() + connection.transport_cls = 'memory' + r = self.app.events.Receiver( + connection, + handlers={'world-war': my_handler}, + node_id='celery.tests', + ) + r._receive(message, object()) + self.assertTrue(got_event[0]) + + def test_catch_all_event(self): + + message = {'type': 'world-war'} + + got_event = [False] + + def my_handler(event): + got_event[0] = True + + connection = Mock() + connection.transport_cls = 'memory' + r = self.app.events.Receiver(connection, node_id='celery.tests') + r.handlers['*'] = my_handler + r._receive(message, object()) + self.assertTrue(got_event[0]) + + def test_itercapture(self): + connection = self.app.connection() + try: + r = self.app.events.Receiver(connection, node_id='celery.tests') + it = r.itercapture(timeout=0.0001, wakeup=False) + + with self.assertRaises(socket.timeout): + next(it) + + with self.assertRaises(socket.timeout): + r.capture(timeout=0.00001) + finally: + connection.close() + + def test_event_from_message_localize_disabled(self): + r = self.app.events.Receiver(Mock(), node_id='celery.tests') + r.adjust_clock = Mock() + ts_adjust = Mock() + + r.event_from_message( + {'type': 'worker-online', 'clock': 313}, + localize=False, + adjust_timestamp=ts_adjust, + ) + self.assertFalse(ts_adjust.called) + r.adjust_clock.assert_called_with(313) + + def test_itercapture_limit(self): + connection = self.app.connection() + channel = connection.channel() + try: + events_received = [0] + + def handler(event): + events_received[0] += 1 + + producer = self.app.events.Dispatcher( + connection, enabled=True, channel=channel, + ) + r = self.app.events.Receiver( + connection, + handlers={'*': handler}, + node_id='celery.tests', + ) + evs = ['ev1', 'ev2', 'ev3', 'ev4', 'ev5'] + for ev in evs: + producer.send(ev) + it = r.itercapture(limit=4, wakeup=True) + next(it) # skip consumer (see itercapture) + list(it) + self.assertEqual(events_received[0], 4) + finally: + channel.close() + connection.close() + + +class test_misc(AppCase): + + def test_State(self): + state = self.app.events.State() + self.assertDictEqual(dict(state.workers), {}) + + def test_default_dispatcher(self): + with self.app.events.default_dispatcher() as d: + self.assertTrue(d) + self.assertTrue(d.connection) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_snapshot.py b/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_snapshot.py new file mode 100644 index 0000000..f551751 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_snapshot.py @@ -0,0 +1,130 @@ +from __future__ import absolute_import + +from celery.events import Events +from celery.events.snapshot import Polaroid, evcam +from celery.tests.case import AppCase, patch, restore_logging + + +class TRef(object): + active = True + called = False + + def __call__(self): + self.called = True + + def cancel(self): + self.active = False + + +class MockTimer(object): + installed = [] + + def call_repeatedly(self, secs, fun, *args, **kwargs): + self.installed.append(fun) + return TRef() +timer = MockTimer() + + +class test_Polaroid(AppCase): + + def setup(self): + self.state = self.app.events.State() + + def test_constructor(self): + x = Polaroid(self.state, app=self.app) + self.assertIs(x.app, self.app) + self.assertIs(x.state, self.state) + self.assertTrue(x.freq) + self.assertTrue(x.cleanup_freq) + self.assertTrue(x.logger) + self.assertFalse(x.maxrate) + + def test_install_timers(self): + x = Polaroid(self.state, app=self.app) + x.timer = timer + x.__exit__() + x.__enter__() + self.assertIn(x.capture, MockTimer.installed) + self.assertIn(x.cleanup, MockTimer.installed) + self.assertTrue(x._tref.active) + self.assertTrue(x._ctref.active) + x.__exit__() + self.assertFalse(x._tref.active) + self.assertFalse(x._ctref.active) + self.assertTrue(x._tref.called) + self.assertFalse(x._ctref.called) + + def test_cleanup(self): + x = Polaroid(self.state, app=self.app) + cleanup_signal_sent = [False] + + def handler(**kwargs): + cleanup_signal_sent[0] = True + + x.cleanup_signal.connect(handler) + x.cleanup() + self.assertTrue(cleanup_signal_sent[0]) + + def test_shutter__capture(self): + x = Polaroid(self.state, app=self.app) + shutter_signal_sent = [False] + + def handler(**kwargs): + shutter_signal_sent[0] = True + + x.shutter_signal.connect(handler) + x.shutter() + self.assertTrue(shutter_signal_sent[0]) + + shutter_signal_sent[0] = False + x.capture() + self.assertTrue(shutter_signal_sent[0]) + + def test_shutter_maxrate(self): + x = Polaroid(self.state, app=self.app, maxrate='1/h') + shutter_signal_sent = [0] + + def handler(**kwargs): + shutter_signal_sent[0] += 1 + + x.shutter_signal.connect(handler) + for i in range(30): + x.shutter() + x.shutter() + x.shutter() + self.assertEqual(shutter_signal_sent[0], 1) + + +class test_evcam(AppCase): + + class MockReceiver(object): + raise_keyboard_interrupt = False + + def capture(self, **kwargs): + if self.__class__.raise_keyboard_interrupt: + raise KeyboardInterrupt() + + class MockEvents(Events): + + def Receiver(self, *args, **kwargs): + return test_evcam.MockReceiver() + + def setup(self): + self.app.events = self.MockEvents() + self.app.events.app = self.app + + def test_evcam(self): + with restore_logging(): + evcam(Polaroid, timer=timer, app=self.app) + evcam(Polaroid, timer=timer, loglevel='CRITICAL', app=self.app) + self.MockReceiver.raise_keyboard_interrupt = True + try: + with self.assertRaises(SystemExit): + evcam(Polaroid, timer=timer, app=self.app) + finally: + self.MockReceiver.raise_keyboard_interrupt = False + + @patch('celery.platforms.create_pidlock') + def test_evcam_pidfile(self, create_pidlock): + evcam(Polaroid, timer=timer, pidfile='/var/pid', app=self.app) + create_pidlock.assert_called_with('/var/pid') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_state.py b/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_state.py new file mode 100644 index 0000000..aab54c4 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/events/test_state.py @@ -0,0 +1,582 @@ +from __future__ import absolute_import + +import pickle + +from decimal import Decimal +from random import shuffle +from time import time +from itertools import count + +from celery import states +from celery.events import Event +from celery.events.state import ( + State, + Worker, + Task, + HEARTBEAT_EXPIRE_WINDOW, + HEARTBEAT_DRIFT_MAX, +) +from celery.five import range +from celery.utils import uuid +from celery.tests.case import AppCase, Mock, patch + +try: + Decimal(2.6) +except TypeError: # pragma: no cover + # Py2.6: Must first convert float to str + _float_to_decimal = str +else: + def _float_to_decimal(f): # noqa + return f + + +class replay(object): + + def __init__(self, state): + self.state = state + self.rewind() + self.setup() + self.current_clock = 0 + + def setup(self): + pass + + def next_event(self): + ev = self.events[next(self.position)] + ev['local_received'] = ev['timestamp'] + try: + self.current_clock = ev['clock'] + except KeyError: + ev['clock'] = self.current_clock = self.current_clock + 1 + return ev + + def __iter__(self): + return self + + def __next__(self): + try: + self.state.event(self.next_event()) + except IndexError: + raise StopIteration() + next = __next__ + + def rewind(self): + self.position = count(0) + return self + + def play(self): + for _ in self: + pass + + +class ev_worker_online_offline(replay): + + def setup(self): + self.events = [ + Event('worker-online', hostname='utest1'), + Event('worker-offline', hostname='utest1'), + ] + + +class ev_worker_heartbeats(replay): + + def setup(self): + self.events = [ + Event('worker-heartbeat', hostname='utest1', + timestamp=time() - HEARTBEAT_EXPIRE_WINDOW * 2), + Event('worker-heartbeat', hostname='utest1'), + ] + + +class ev_task_states(replay): + + def setup(self): + tid = self.tid = uuid() + self.events = [ + Event('task-received', uuid=tid, name='task1', + args='(2, 2)', kwargs="{'foo': 'bar'}", + retries=0, eta=None, hostname='utest1'), + Event('task-started', uuid=tid, hostname='utest1'), + Event('task-revoked', uuid=tid, hostname='utest1'), + Event('task-retried', uuid=tid, exception="KeyError('bar')", + traceback='line 2 at main', hostname='utest1'), + Event('task-failed', uuid=tid, exception="KeyError('foo')", + traceback='line 1 at main', hostname='utest1'), + Event('task-succeeded', uuid=tid, result='4', + runtime=0.1234, hostname='utest1'), + ] + + +def QTEV(type, uuid, hostname, clock, name=None, timestamp=None): + """Quick task event.""" + return Event('task-{0}'.format(type), uuid=uuid, hostname=hostname, + clock=clock, name=name, timestamp=timestamp or time()) + + +class ev_logical_clock_ordering(replay): + + def __init__(self, state, offset=0, uids=None): + self.offset = offset or 0 + self.uids = self.setuids(uids) + super(ev_logical_clock_ordering, self).__init__(state) + + def setuids(self, uids): + uids = self.tA, self.tB, self.tC = uids or [uuid(), uuid(), uuid()] + return uids + + def setup(self): + offset = self.offset + tA, tB, tC = self.uids + self.events = [ + QTEV('received', tA, 'w1', name='tA', clock=offset + 1), + QTEV('received', tB, 'w2', name='tB', clock=offset + 1), + QTEV('started', tA, 'w1', name='tA', clock=offset + 3), + QTEV('received', tC, 'w2', name='tC', clock=offset + 3), + QTEV('started', tB, 'w2', name='tB', clock=offset + 5), + QTEV('retried', tA, 'w1', name='tA', clock=offset + 7), + QTEV('succeeded', tB, 'w2', name='tB', clock=offset + 9), + QTEV('started', tC, 'w2', name='tC', clock=offset + 10), + QTEV('received', tA, 'w3', name='tA', clock=offset + 13), + QTEV('succeded', tC, 'w2', name='tC', clock=offset + 12), + QTEV('started', tA, 'w3', name='tA', clock=offset + 14), + QTEV('succeeded', tA, 'w3', name='TA', clock=offset + 16), + ] + + def rewind_with_offset(self, offset, uids=None): + self.offset = offset + self.uids = self.setuids(uids or self.uids) + self.setup() + self.rewind() + + +class ev_snapshot(replay): + + def setup(self): + self.events = [ + Event('worker-online', hostname='utest1'), + Event('worker-online', hostname='utest2'), + Event('worker-online', hostname='utest3'), + ] + for i in range(20): + worker = not i % 2 and 'utest2' or 'utest1' + type = not i % 2 and 'task2' or 'task1' + self.events.append(Event('task-received', name=type, + uuid=uuid(), hostname=worker)) + + +class test_Worker(AppCase): + + def test_equality(self): + self.assertEqual(Worker(hostname='foo').hostname, 'foo') + self.assertEqual( + Worker(hostname='foo'), Worker(hostname='foo'), + ) + self.assertNotEqual( + Worker(hostname='foo'), Worker(hostname='bar'), + ) + self.assertEqual( + hash(Worker(hostname='foo')), hash(Worker(hostname='foo')), + ) + self.assertNotEqual( + hash(Worker(hostname='foo')), hash(Worker(hostname='bar')), + ) + + def test_compatible_with_Decimal(self): + w = Worker('george@vandelay.com') + timestamp, local_received = Decimal(_float_to_decimal(time())), time() + w.event('worker-online', timestamp, local_received, fields={ + 'hostname': 'george@vandelay.com', + 'timestamp': timestamp, + 'local_received': local_received, + 'freq': Decimal(_float_to_decimal(5.6335431)), + }) + self.assertTrue(w.alive) + + def test_survives_missing_timestamp(self): + worker = Worker(hostname='foo') + worker.event('heartbeat') + self.assertEqual(worker.heartbeats, []) + + def test_repr(self): + self.assertTrue(repr(Worker(hostname='foo'))) + + def test_drift_warning(self): + worker = Worker(hostname='foo') + with patch('celery.events.state.warn') as warn: + worker.event(None, time() + (HEARTBEAT_DRIFT_MAX * 2), time()) + self.assertTrue(warn.called) + self.assertIn('Substantial drift', warn.call_args[0][0]) + + def test_updates_heartbeat(self): + worker = Worker(hostname='foo') + worker.event(None, time(), time()) + self.assertEqual(len(worker.heartbeats), 1) + h1 = worker.heartbeats[0] + worker.event(None, time(), time() - 10) + self.assertEqual(len(worker.heartbeats), 2) + self.assertEqual(worker.heartbeats[-1], h1) + + +class test_Task(AppCase): + + def test_equality(self): + self.assertEqual(Task(uuid='foo').uuid, 'foo') + self.assertEqual( + Task(uuid='foo'), Task(uuid='foo'), + ) + self.assertNotEqual( + Task(uuid='foo'), Task(uuid='bar'), + ) + self.assertEqual( + hash(Task(uuid='foo')), hash(Task(uuid='foo')), + ) + self.assertNotEqual( + hash(Task(uuid='foo')), hash(Task(uuid='bar')), + ) + + def test_info(self): + task = Task(uuid='abcdefg', + name='tasks.add', + args='(2, 2)', + kwargs='{}', + retries=2, + result=42, + eta=1, + runtime=0.0001, + expires=1, + foo=None, + exception=1, + received=time() - 10, + started=time() - 8, + exchange='celery', + routing_key='celery', + succeeded=time()) + self.assertEqual(sorted(list(task._info_fields)), + sorted(task.info().keys())) + + self.assertEqual(sorted(list(task._info_fields + ('received', ))), + sorted(task.info(extra=('received', )))) + + self.assertEqual(sorted(['args', 'kwargs']), + sorted(task.info(['args', 'kwargs']).keys())) + self.assertFalse(list(task.info('foo'))) + + def test_ready(self): + task = Task(uuid='abcdefg', + name='tasks.add') + task.event('received', time(), time()) + self.assertFalse(task.ready) + task.event('succeeded', time(), time()) + self.assertTrue(task.ready) + + def test_sent(self): + task = Task(uuid='abcdefg', + name='tasks.add') + task.event('sent', time(), time()) + self.assertEqual(task.state, states.PENDING) + + def test_merge(self): + task = Task() + task.event('failed', time(), time()) + task.event('started', time(), time()) + task.event('received', time(), time(), { + 'name': 'tasks.add', 'args': (2, 2), + }) + self.assertEqual(task.state, states.FAILURE) + self.assertEqual(task.name, 'tasks.add') + self.assertTupleEqual(task.args, (2, 2)) + task.event('retried', time(), time()) + self.assertEqual(task.state, states.RETRY) + + def test_repr(self): + self.assertTrue(repr(Task(uuid='xxx', name='tasks.add'))) + + +class test_State(AppCase): + + def test_repr(self): + self.assertTrue(repr(State())) + + def test_pickleable(self): + self.assertTrue(pickle.loads(pickle.dumps(State()))) + + def test_task_logical_clock_ordering(self): + state = State() + r = ev_logical_clock_ordering(state) + tA, tB, tC = r.uids + r.play() + now = list(state.tasks_by_time()) + self.assertEqual(now[0][0], tA) + self.assertEqual(now[1][0], tC) + self.assertEqual(now[2][0], tB) + for _ in range(1000): + shuffle(r.uids) + tA, tB, tC = r.uids + r.rewind_with_offset(r.current_clock + 1, r.uids) + r.play() + now = list(state.tasks_by_time()) + self.assertEqual(now[0][0], tA) + self.assertEqual(now[1][0], tC) + self.assertEqual(now[2][0], tB) + + def test_worker_online_offline(self): + r = ev_worker_online_offline(State()) + next(r) + self.assertTrue(r.state.alive_workers()) + self.assertTrue(r.state.workers['utest1'].alive) + r.play() + self.assertFalse(r.state.alive_workers()) + self.assertFalse(r.state.workers['utest1'].alive) + + def test_itertasks(self): + s = State() + s.tasks = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'} + self.assertEqual(len(list(s.itertasks(limit=2))), 2) + + def test_worker_heartbeat_expire(self): + r = ev_worker_heartbeats(State()) + next(r) + self.assertFalse(r.state.alive_workers()) + self.assertFalse(r.state.workers['utest1'].alive) + r.play() + self.assertTrue(r.state.alive_workers()) + self.assertTrue(r.state.workers['utest1'].alive) + + def test_task_states(self): + r = ev_task_states(State()) + + # RECEIVED + next(r) + self.assertTrue(r.tid in r.state.tasks) + task = r.state.tasks[r.tid] + self.assertEqual(task.state, states.RECEIVED) + self.assertTrue(task.received) + self.assertEqual(task.timestamp, task.received) + self.assertEqual(task.worker.hostname, 'utest1') + + # STARTED + next(r) + self.assertTrue(r.state.workers['utest1'].alive, + 'any task event adds worker heartbeat') + self.assertEqual(task.state, states.STARTED) + self.assertTrue(task.started) + self.assertEqual(task.timestamp, task.started) + self.assertEqual(task.worker.hostname, 'utest1') + + # REVOKED + next(r) + self.assertEqual(task.state, states.REVOKED) + self.assertTrue(task.revoked) + self.assertEqual(task.timestamp, task.revoked) + self.assertEqual(task.worker.hostname, 'utest1') + + # RETRY + next(r) + self.assertEqual(task.state, states.RETRY) + self.assertTrue(task.retried) + self.assertEqual(task.timestamp, task.retried) + self.assertEqual(task.worker.hostname, 'utest1') + self.assertEqual(task.exception, "KeyError('bar')") + self.assertEqual(task.traceback, 'line 2 at main') + + # FAILURE + next(r) + self.assertEqual(task.state, states.FAILURE) + self.assertTrue(task.failed) + self.assertEqual(task.timestamp, task.failed) + self.assertEqual(task.worker.hostname, 'utest1') + self.assertEqual(task.exception, "KeyError('foo')") + self.assertEqual(task.traceback, 'line 1 at main') + + # SUCCESS + next(r) + self.assertEqual(task.state, states.SUCCESS) + self.assertTrue(task.succeeded) + self.assertEqual(task.timestamp, task.succeeded) + self.assertEqual(task.worker.hostname, 'utest1') + self.assertEqual(task.result, '4') + self.assertEqual(task.runtime, 0.1234) + + def assertStateEmpty(self, state): + self.assertFalse(state.tasks) + self.assertFalse(state.workers) + self.assertFalse(state.event_count) + self.assertFalse(state.task_count) + + def assertState(self, state): + self.assertTrue(state.tasks) + self.assertTrue(state.workers) + self.assertTrue(state.event_count) + self.assertTrue(state.task_count) + + def test_freeze_while(self): + s = State() + r = ev_snapshot(s) + r.play() + + def work(): + pass + + s.freeze_while(work, clear_after=True) + self.assertFalse(s.event_count) + + s2 = State() + r = ev_snapshot(s2) + r.play() + s2.freeze_while(work, clear_after=False) + self.assertTrue(s2.event_count) + + def test_clear_tasks(self): + s = State() + r = ev_snapshot(s) + r.play() + self.assertTrue(s.tasks) + s.clear_tasks(ready=False) + self.assertFalse(s.tasks) + + def test_clear(self): + r = ev_snapshot(State()) + r.play() + self.assertTrue(r.state.event_count) + self.assertTrue(r.state.workers) + self.assertTrue(r.state.tasks) + self.assertTrue(r.state.task_count) + + r.state.clear() + self.assertFalse(r.state.event_count) + self.assertFalse(r.state.workers) + self.assertTrue(r.state.tasks) + self.assertFalse(r.state.task_count) + + r.state.clear(False) + self.assertFalse(r.state.tasks) + + def test_task_types(self): + r = ev_snapshot(State()) + r.play() + self.assertEqual(sorted(r.state.task_types()), ['task1', 'task2']) + + def test_tasks_by_timestamp(self): + r = ev_snapshot(State()) + r.play() + self.assertEqual(len(list(r.state.tasks_by_timestamp())), 20) + + def test_tasks_by_type(self): + r = ev_snapshot(State()) + r.play() + self.assertEqual(len(list(r.state.tasks_by_type('task1'))), 10) + self.assertEqual(len(list(r.state.tasks_by_type('task2'))), 10) + + def test_alive_workers(self): + r = ev_snapshot(State()) + r.play() + self.assertEqual(len(r.state.alive_workers()), 3) + + def test_tasks_by_worker(self): + r = ev_snapshot(State()) + r.play() + self.assertEqual(len(list(r.state.tasks_by_worker('utest1'))), 10) + self.assertEqual(len(list(r.state.tasks_by_worker('utest2'))), 10) + + def test_survives_unknown_worker_event(self): + s = State() + s.event({ + 'type': 'worker-unknown-event-xxx', + 'foo': 'bar', + }) + s.event({ + 'type': 'worker-unknown-event-xxx', + 'hostname': 'xxx', + 'foo': 'bar', + }) + + def test_survives_unknown_worker_leaving(self): + s = State(on_node_leave=Mock(name='on_node_leave')) + (worker, created), subject = s.event({ + 'type': 'worker-offline', + 'hostname': 'unknown@vandelay.com', + 'timestamp': time(), + 'local_received': time(), + 'clock': 301030134894833, + }) + self.assertEqual(worker, Worker('unknown@vandelay.com')) + self.assertFalse(created) + self.assertEqual(subject, 'offline') + self.assertNotIn('unknown@vandelay.com', s.workers) + s.on_node_leave.assert_called_with(worker) + + def test_on_node_join_callback(self): + s = State(on_node_join=Mock(name='on_node_join')) + (worker, created), subject = s.event({ + 'type': 'worker-online', + 'hostname': 'george@vandelay.com', + 'timestamp': time(), + 'local_received': time(), + 'clock': 34314, + }) + self.assertTrue(worker) + self.assertTrue(created) + self.assertEqual(subject, 'online') + self.assertIn('george@vandelay.com', s.workers) + s.on_node_join.assert_called_with(worker) + + def test_survives_unknown_task_event(self): + s = State() + s.event( + { + 'type': 'task-unknown-event-xxx', + 'foo': 'bar', + 'uuid': 'x', + 'hostname': 'y', + 'timestamp': time(), + 'local_received': time(), + 'clock': 0, + }, + ) + + def test_limits_maxtasks(self): + s = State(max_tasks_in_memory=1) + s.heap_multiplier = 2 + s.event({ + 'type': 'task-unknown-event-xxx', + 'foo': 'bar', + 'uuid': 'x', + 'hostname': 'y', + 'clock': 3, + 'timestamp': time(), + 'local_received': time(), + }) + s.event({ + 'type': 'task-unknown-event-xxx', + 'foo': 'bar', + 'uuid': 'y', + 'hostname': 'y', + 'clock': 4, + 'timestamp': time(), + 'local_received': time(), + }) + s.event({ + 'type': 'task-unknown-event-xxx', + 'foo': 'bar', + 'uuid': 'z', + 'hostname': 'y', + 'clock': 5, + 'timestamp': time(), + 'local_received': time(), + }) + self.assertEqual(len(s._taskheap), 2) + self.assertEqual(s._taskheap[0].clock, 4) + self.assertEqual(s._taskheap[1].clock, 5) + + s._taskheap.append(s._taskheap[0]) + self.assertTrue(list(s.tasks_by_time())) + + def test_callback(self): + scratch = {} + + def callback(state, event): + scratch['recv'] = True + + s = State(callback=callback) + s.event({'type': 'worker-online'}) + self.assertTrue(scratch.get('recv')) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/fixups/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/fixups/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/fixups/test_django.py b/thesisenv/lib/python3.6/site-packages/celery/tests/fixups/test_django.py new file mode 100644 index 0000000..94b755e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/fixups/test_django.py @@ -0,0 +1,301 @@ +from __future__ import absolute_import + +import os + +from contextlib import contextmanager + +from celery.fixups.django import ( + _maybe_close_fd, + fixup, + DjangoFixup, + DjangoWorkerFixup, +) + +from celery.tests.case import ( + AppCase, Mock, patch, patch_many, patch_modules, mask_modules, +) + + +class FixupCase(AppCase): + Fixup = None + + @contextmanager + def fixup_context(self, app): + with patch('celery.fixups.django.DjangoWorkerFixup.validate_models'): + with patch('celery.fixups.django.symbol_by_name') as symbyname: + with patch('celery.fixups.django.import_module') as impmod: + f = self.Fixup(app) + yield f, impmod, symbyname + + +class test_DjangoFixup(FixupCase): + Fixup = DjangoFixup + + def test_fixup(self): + with patch('celery.fixups.django.DjangoFixup') as Fixup: + with patch.dict(os.environ, DJANGO_SETTINGS_MODULE=''): + fixup(self.app) + self.assertFalse(Fixup.called) + with patch.dict(os.environ, DJANGO_SETTINGS_MODULE='settings'): + with mask_modules('django'): + with self.assertWarnsRegex(UserWarning, 'but Django is'): + fixup(self.app) + self.assertFalse(Fixup.called) + with patch_modules('django'): + fixup(self.app) + self.assertTrue(Fixup.called) + + def test_maybe_close_fd(self): + with patch('os.close'): + _maybe_close_fd(Mock()) + _maybe_close_fd(object()) + + def test_init(self): + with self.fixup_context(self.app) as (f, importmod, sym): + self.assertTrue(f) + + def se(name): + if name == 'django.utils.timezone:now': + raise ImportError() + return Mock() + sym.side_effect = se + self.assertTrue(self.Fixup(self.app)._now) + + def test_install(self): + self.app.loader = Mock() + with self.fixup_context(self.app) as (f, _, _): + with patch_many('os.getcwd', 'sys.path', + 'celery.fixups.django.signals') as (cw, p, sigs): + cw.return_value = '/opt/vandelay' + f.install() + sigs.worker_init.connect.assert_called_with(f.on_worker_init) + self.assertEqual(self.app.loader.now, f.now) + self.assertEqual(self.app.loader.mail_admins, f.mail_admins) + p.append.assert_called_with('/opt/vandelay') + + def test_now(self): + with self.fixup_context(self.app) as (f, _, _): + self.assertTrue(f.now(utc=True)) + self.assertFalse(f._now.called) + self.assertTrue(f.now(utc=False)) + self.assertTrue(f._now.called) + + def test_mail_admins(self): + with self.fixup_context(self.app) as (f, _, _): + f.mail_admins('sub', 'body', True) + f._mail_admins.assert_called_with( + 'sub', 'body', fail_silently=True, + ) + + def test_on_worker_init(self): + with self.fixup_context(self.app) as (f, _, _): + with patch('celery.fixups.django.DjangoWorkerFixup') as DWF: + f.on_worker_init() + DWF.assert_called_with(f.app) + DWF.return_value.install.assert_called_with() + self.assertIs(f._worker_fixup, DWF.return_value) + + +class test_DjangoWorkerFixup(FixupCase): + Fixup = DjangoWorkerFixup + + def test_init(self): + with self.fixup_context(self.app) as (f, importmod, sym): + self.assertTrue(f) + + def se(name): + if name == 'django.db:close_old_connections': + raise ImportError() + return Mock() + sym.side_effect = se + self.assertIsNone(self.Fixup(self.app)._close_old_connections) + + def test_install(self): + self.app.conf = {'CELERY_DB_REUSE_MAX': None} + self.app.loader = Mock() + with self.fixup_context(self.app) as (f, _, _): + with patch_many('celery.fixups.django.signals') as (sigs, ): + f.install() + sigs.beat_embedded_init.connect.assert_called_with( + f.close_database, + ) + sigs.worker_ready.connect.assert_called_with(f.on_worker_ready) + sigs.task_prerun.connect.assert_called_with(f.on_task_prerun) + sigs.task_postrun.connect.assert_called_with(f.on_task_postrun) + sigs.worker_process_init.connect.assert_called_with( + f.on_worker_process_init, + ) + + def test_on_worker_process_init(self): + with self.fixup_context(self.app) as (f, _, _): + with patch('celery.fixups.django._maybe_close_fd') as mcf: + _all = f._db.connections.all = Mock() + conns = _all.return_value = [ + Mock(), Mock(), + ] + conns[0].connection = None + with patch.object(f, 'close_cache'): + with patch.object(f, '_close_database'): + f.on_worker_process_init() + mcf.assert_called_with(conns[1].connection) + f.close_cache.assert_called_with() + f._close_database.assert_called_with() + + mcf.reset_mock() + _all.side_effect = AttributeError() + f.on_worker_process_init() + mcf.assert_called_with(f._db.connection.connection) + f._db.connection = None + f.on_worker_process_init() + + def test_on_task_prerun(self): + task = Mock() + with self.fixup_context(self.app) as (f, _, _): + task.request.is_eager = False + with patch.object(f, 'close_database'): + f.on_task_prerun(task) + f.close_database.assert_called_with() + + task.request.is_eager = True + with patch.object(f, 'close_database'): + f.on_task_prerun(task) + self.assertFalse(f.close_database.called) + + def test_on_task_postrun(self): + task = Mock() + with self.fixup_context(self.app) as (f, _, _): + with patch.object(f, 'close_cache'): + task.request.is_eager = False + with patch.object(f, 'close_database'): + f.on_task_postrun(task) + self.assertTrue(f.close_database.called) + self.assertTrue(f.close_cache.called) + + # when a task is eager, do not close connections + with patch.object(f, 'close_cache'): + task.request.is_eager = True + with patch.object(f, 'close_database'): + f.on_task_postrun(task) + self.assertFalse(f.close_database.called) + self.assertFalse(f.close_cache.called) + + def test_close_database(self): + with self.fixup_context(self.app) as (f, _, _): + f._close_old_connections = Mock() + f.close_database() + f._close_old_connections.assert_called_with() + f._close_old_connections = None + with patch.object(f, '_close_database') as _close: + f.db_reuse_max = None + f.close_database() + _close.assert_called_with() + _close.reset_mock() + + f.db_reuse_max = 10 + f._db_recycles = 3 + f.close_database() + self.assertFalse(_close.called) + self.assertEqual(f._db_recycles, 4) + _close.reset_mock() + + f._db_recycles = 20 + f.close_database() + _close.assert_called_with() + self.assertEqual(f._db_recycles, 1) + + def test__close_database(self): + with self.fixup_context(self.app) as (f, _, _): + conns = [Mock(), Mock(), Mock()] + conns[1].close.side_effect = KeyError('already closed') + f.database_errors = (KeyError, ) + + f._db.connections = Mock() # ConnectionHandler + f._db.connections.all.side_effect = lambda: conns + + f._close_database() + conns[0].close.assert_called_with() + conns[1].close.assert_called_with() + conns[2].close.assert_called_with() + + conns[1].close.side_effect = KeyError('omg') + with self.assertRaises(KeyError): + f._close_database() + + class Object(object): + pass + o = Object() + o.close_connection = Mock() + f._db = o + f._close_database() + o.close_connection.assert_called_with() + + def test_close_cache(self): + with self.fixup_context(self.app) as (f, _, _): + f.close_cache() + f._cache.cache.close.assert_called_with() + f._cache.cache.close.side_effect = TypeError() + f.close_cache() + + def test_on_worker_ready(self): + with self.fixup_context(self.app) as (f, _, _): + f._settings.DEBUG = False + f.on_worker_ready() + with self.assertWarnsRegex(UserWarning, r'leads to a memory leak'): + f._settings.DEBUG = True + f.on_worker_ready() + + def test_mysql_errors(self): + with patch_modules('MySQLdb'): + import MySQLdb as mod + mod.DatabaseError = Mock() + mod.InterfaceError = Mock() + mod.OperationalError = Mock() + with self.fixup_context(self.app) as (f, _, _): + self.assertIn(mod.DatabaseError, f.database_errors) + self.assertIn(mod.InterfaceError, f.database_errors) + self.assertIn(mod.OperationalError, f.database_errors) + with mask_modules('MySQLdb'): + with self.fixup_context(self.app): + pass + + def test_pg_errors(self): + with patch_modules('psycopg2'): + import psycopg2 as mod + mod.DatabaseError = Mock() + mod.InterfaceError = Mock() + mod.OperationalError = Mock() + with self.fixup_context(self.app) as (f, _, _): + self.assertIn(mod.DatabaseError, f.database_errors) + self.assertIn(mod.InterfaceError, f.database_errors) + self.assertIn(mod.OperationalError, f.database_errors) + with mask_modules('psycopg2'): + with self.fixup_context(self.app): + pass + + def test_sqlite_errors(self): + with patch_modules('sqlite3'): + import sqlite3 as mod + mod.DatabaseError = Mock() + mod.InterfaceError = Mock() + mod.OperationalError = Mock() + with self.fixup_context(self.app) as (f, _, _): + self.assertIn(mod.DatabaseError, f.database_errors) + self.assertIn(mod.InterfaceError, f.database_errors) + self.assertIn(mod.OperationalError, f.database_errors) + with mask_modules('sqlite3'): + with self.fixup_context(self.app): + pass + + def test_oracle_errors(self): + with patch_modules('cx_Oracle'): + import cx_Oracle as mod + mod.DatabaseError = Mock() + mod.InterfaceError = Mock() + mod.OperationalError = Mock() + with self.fixup_context(self.app) as (f, _, _): + self.assertIn(mod.DatabaseError, f.database_errors) + self.assertIn(mod.InterfaceError, f.database_errors) + self.assertIn(mod.OperationalError, f.database_errors) + with mask_modules('cx_Oracle'): + with self.fixup_context(self.app): + pass diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/functional/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/functional/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/functional/case.py b/thesisenv/lib/python3.6/site-packages/celery/tests/functional/case.py new file mode 100644 index 0000000..298c684 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/functional/case.py @@ -0,0 +1,178 @@ +from __future__ import absolute_import + +import atexit +import logging +import os +import signal +import socket +import sys +import traceback + +from itertools import count +from time import time + +from celery import current_app +from celery.exceptions import TimeoutError +from celery.app.control import flatten_reply +from celery.utils.imports import qualname + +from celery.tests.case import Case + +HOSTNAME = socket.gethostname() + + +def say(msg): + sys.stderr.write('%s\n' % msg) + + +def try_while(fun, reason='Timed out', timeout=10, interval=0.5): + time_start = time() + for iterations in count(0): + if time() - time_start >= timeout: + raise TimeoutError() + ret = fun() + if ret: + return ret + + +class Worker(object): + started = False + worker_ids = count(1) + _shutdown_called = False + + def __init__(self, hostname, loglevel='error', app=None): + self.hostname = hostname + self.loglevel = loglevel + self.app = app or current_app._get_current_object() + + def start(self): + if not self.started: + self._fork_and_exec() + self.started = True + + def _fork_and_exec(self): + pid = os.fork() + if pid == 0: + self.app.worker_main(['worker', '--loglevel=INFO', + '-n', self.hostname, + '-P', 'solo']) + os._exit(0) + self.pid = pid + + def ping(self, *args, **kwargs): + return self.app.control.ping(*args, **kwargs) + + def is_alive(self, timeout=1): + r = self.ping(destination=[self.hostname], timeout=timeout) + return self.hostname in flatten_reply(r) + + def wait_until_started(self, timeout=10, interval=0.5): + try_while( + lambda: self.is_alive(interval), + "Worker won't start (after %s secs.)" % timeout, + interval=interval, timeout=timeout, + ) + say('--WORKER %s IS ONLINE--' % self.hostname) + + def ensure_shutdown(self, timeout=10, interval=0.5): + os.kill(self.pid, signal.SIGTERM) + try_while( + lambda: not self.is_alive(interval), + "Worker won't shutdown (after %s secs.)" % timeout, + timeout=10, interval=0.5, + ) + say('--WORKER %s IS SHUTDOWN--' % self.hostname) + self._shutdown_called = True + + def ensure_started(self): + self.start() + self.wait_until_started() + + @classmethod + def managed(cls, hostname=None, caller=None): + hostname = hostname or socket.gethostname() + if caller: + hostname = '.'.join([qualname(caller), hostname]) + else: + hostname += str(next(cls.worker_ids())) + worker = cls(hostname) + worker.ensure_started() + stack = traceback.format_stack() + + @atexit.register + def _ensure_shutdown_once(): + if not worker._shutdown_called: + say('-- Found worker not stopped at shutdown: %s\n%s' % ( + worker.hostname, + '\n'.join(stack))) + worker.ensure_shutdown() + + return worker + + +class WorkerCase(Case): + hostname = HOSTNAME + worker = None + + @classmethod + def setUpClass(cls): + logging.getLogger('amqp').setLevel(logging.ERROR) + cls.worker = Worker.managed(cls.hostname, caller=cls) + + @classmethod + def tearDownClass(cls): + cls.worker.ensure_shutdown() + + def assertWorkerAlive(self, timeout=1): + self.assertTrue(self.worker.is_alive) + + def inspect(self, timeout=1): + return self.app.control.inspect([self.worker.hostname], + timeout=timeout) + + def my_response(self, response): + return flatten_reply(response)[self.worker.hostname] + + def is_accepted(self, task_id, interval=0.5): + active = self.inspect(timeout=interval).active() + if active: + for task in active[self.worker.hostname]: + if task['id'] == task_id: + return True + return False + + def is_reserved(self, task_id, interval=0.5): + reserved = self.inspect(timeout=interval).reserved() + if reserved: + for task in reserved[self.worker.hostname]: + if task['id'] == task_id: + return True + return False + + def is_scheduled(self, task_id, interval=0.5): + schedule = self.inspect(timeout=interval).scheduled() + if schedule: + for item in schedule[self.worker.hostname]: + if item['request']['id'] == task_id: + return True + return False + + def is_received(self, task_id, interval=0.5): + return (self.is_reserved(task_id, interval) or + self.is_scheduled(task_id, interval) or + self.is_accepted(task_id, interval)) + + def ensure_accepted(self, task_id, interval=0.5, timeout=10): + return try_while(lambda: self.is_accepted(task_id, interval), + 'Task not accepted within timeout', + interval=0.5, timeout=10) + + def ensure_received(self, task_id, interval=0.5, timeout=10): + return try_while(lambda: self.is_received(task_id, interval), + 'Task not receied within timeout', + interval=0.5, timeout=10) + + def ensure_scheduled(self, task_id, interval=0.5, timeout=10): + return try_while(lambda: self.is_scheduled(task_id, interval), + 'Task not scheduled within timeout', + interval=0.5, timeout=10) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/functional/tasks.py b/thesisenv/lib/python3.6/site-packages/celery/tests/functional/tasks.py new file mode 100644 index 0000000..85479b4 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/functional/tasks.py @@ -0,0 +1,24 @@ +from __future__ import absolute_import + +import time + +from celery import task, signature + + +@task() +def add(x, y): + return x + y + + +@task() +def add_cb(x, y, callback=None): + result = x + y + if callback: + return signature(callback).apply_async(result) + return result + + +@task() +def sleeptask(i): + time.sleep(i) + return i diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/security/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/security/__init__.py new file mode 100644 index 0000000..50b7f4c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/security/__init__.py @@ -0,0 +1,68 @@ +from __future__ import absolute_import +""" +Keys and certificates for tests (KEY1 is a private key of CERT1, etc.) + +Generated with `extra/security/get-cert.sh` + +""" +KEY1 = """-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQC9Twh0V5q/R1Q8N+Y+CNM4lj9AXeZL0gYowoK1ht2ZLCDU9vN5 +dhV0x3sqaXLjQNeCGd6b2vTbFGdF2E45//IWz6/BdPFWaPm0rtYbcxZHqXDZScRp +vFDLHhMysdqQWHxXVxpqIXXo4B7bnfnGvXhYwYITeEyQylV/rnH53mdV8wIDAQAB +AoGBAKUJN4elr+S9nHP7D6BZNTsJ0Q6eTd0ftfrmx+jVMG8Oh3jh6ZSkG0R5e6iX +0W7I4pgrUWRyWDB98yJy1o+90CAN/D80o8SbmW/zfA2WLBteOujMfCEjNrc/Nodf +6MZ0QQ6PnPH6pp94i3kNmFD8Mlzm+ODrUjPF0dCNf474qeKhAkEA7SXj5cQPyQXM +s15oGX5eb6VOk96eAPtEC72cLSh6o+VYmXyGroV1A2JPm6IzH87mTqjWXG229hjt +XVvDbdY2uQJBAMxblWFaWJhhU6Y1euazaBl/OyLYlqNz4LZ0RzCulEoV/gMGYU32 +PbilD5fpFsyhp5oCxnWNEsUFovYMKjKM3AsCQQCIlOcBoP76ZxWzRK8t56MaKBnu +fiuAIzbYkDbPp12i4Wc61wZ2ozR2Y3u4Bh3tturb6M+04hea+1ZSC5StwM85AkAp +UPLYpe13kWXaGsHoVqlbTk/kcamzDkCGYufpvcIZYGzkq6uMmZZM+II4klWbtasv +BhSdu5Hp54PU/wyg/72VAkBy1/oM3/QJ35Vb6TByHBLFR4nOuORoRclmxcoCPva9 +xqkQQn+UgBtOemRXpFCuKaoXonA3nLeB54SWcC6YUOcR +-----END RSA PRIVATE KEY-----""" + +KEY2 = """-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQDH22L8b9AmST9ABDmQTQ2DWMdDmK5YXZt4AIY81IcsTQ/ccM0C +fwXEP9tdkYwtcxMCWdASwY5pfMy9vFp0hyrRQMSNfuoxAgONuNWPyQoIvY3ZXRe6 +rS+hb/LN4+vdjX+oxmYiQ2HmSB9rh2bepE6Cw+RLJr5sXXq+xZJ+BLt5tQIDAQAB +AoGBAMGBO0Arip/nP6Rd8tYypKjN5nEefX/1cjgoWdC//fj4zCil1vlZv12abm0U +JWNEDd2y0/G1Eow0V5BFtFcrIFowU44LZEiSf7sKXlNHRHlbZmDgNXFZOt7nVbHn +6SN+oCYjaPjji8idYeb3VQXPtqMoMn73MuyxD3k3tWmVLonpAkEA6hsu62qhUk5k +Nt88UZOauU1YizxsWvT0bHioaceE4TEsbO3NZs7dmdJIcRFcU787lANaaIq7Rw26 +qcumME9XhwJBANqMOzsYQ6BX54UzS6x99Jjlq9MEbTCbAEZr/yjopb9f617SwfuE +AEKnIq3HL6/Tnhv3V8Zy3wYHgDoGNeTVe+MCQQDi/nyeNAQ8RFqTgh2Ak/jAmCi0 +yV/fSgj+bHgQKS/FEuMas/IoL4lbrzQivkyhv5lLSX0ORQaWPM+z+A0qZqRdAkBh +XE+Wx/x4ljCh+nQf6AzrgIXHgBVUrfi1Zq9Jfjs4wnaMy793WRr0lpiwaigoYFHz +i4Ei+1G30eeh8dpYk3KZAkB0ucTOsQynDlL5rLGYZ+IcfSfH3w2l5EszY47kKQG9 +Fxeq/HOp9JYw4gRu6Ycvqu57KHwpHhR0FCXRBxuYcJ5V +-----END RSA PRIVATE KEY-----""" + +CERT1 = """-----BEGIN CERTIFICATE----- +MIICVzCCAcACCQC72PP7b7H9BTANBgkqhkiG9w0BAQUFADBwMQswCQYDVQQGEwJV +UzELMAkGA1UECBMCQ0ExCzAJBgNVBAcTAlNGMQ8wDQYDVQQKEwZDZWxlcnkxDzAN +BgNVBAMTBkNFbGVyeTElMCMGCSqGSIb3DQEJARYWY2VydEBjZWxlcnlwcm9qZWN0 +Lm9yZzAeFw0xMzA3MjQxMjExMTRaFw0xNDA3MjQxMjExMTRaMHAxCzAJBgNVBAYT +AlVTMQswCQYDVQQIEwJDQTELMAkGA1UEBxMCU0YxDzANBgNVBAoTBkNlbGVyeTEP +MA0GA1UEAxMGQ0VsZXJ5MSUwIwYJKoZIhvcNAQkBFhZjZXJ0QGNlbGVyeXByb2pl +Y3Qub3JnMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQC9Twh0V5q/R1Q8N+Y+ +CNM4lj9AXeZL0gYowoK1ht2ZLCDU9vN5dhV0x3sqaXLjQNeCGd6b2vTbFGdF2E45 +//IWz6/BdPFWaPm0rtYbcxZHqXDZScRpvFDLHhMysdqQWHxXVxpqIXXo4B7bnfnG +vXhYwYITeEyQylV/rnH53mdV8wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAKA4tD3J +94tsnQxFxHP7Frt7IvGMH+3wMqOiXFgYxPJX2tyaPvOLJ/7ERE4MkrvZO7IRC0iA +yKBe0pucdrTgsJoDV8juahuyjXOjvU14+q7Wv7pj7zqddVavzK8STLX4/FMIDnbK +aMGJl7wyj6V2yy6ANSbmy0uQjHikI6DrZEoK +-----END CERTIFICATE-----""" + +CERT2 = """-----BEGIN CERTIFICATE----- +MIICATCCAWoCCQCV/9A2ZBM37TANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJB +VTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0 +cyBQdHkgTHRkMB4XDTExMDcxOTA5MDkwMloXDTEyMDcxODA5MDkwMlowRTELMAkG +A1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0 +IFdpZGdpdHMgUHR5IEx0ZDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAx9ti +/G/QJkk/QAQ5kE0Ng1jHQ5iuWF2beACGPNSHLE0P3HDNAn8FxD/bXZGMLXMTAlnQ +EsGOaXzMvbxadIcq0UDEjX7qMQIDjbjVj8kKCL2N2V0Xuq0voW/yzePr3Y1/qMZm +IkNh5kgfa4dm3qROgsPkSya+bF16vsWSfgS7ebUCAwEAATANBgkqhkiG9w0BAQUF +AAOBgQBzaZ5vBkzksPhnWb2oobuy6Ne/LMEtdQ//qeVY4sKl2tOJUCSdWRen9fqP +e+zYdEdkFCd8rp568Eiwkq/553uy4rlE927/AEqs/+KGYmAtibk/9vmi+/+iZXyS +WWZybzzDZFncq1/N1C3Y/hrCBNDFO4TsnTLAhWtZ4c0vDAiacw== +-----END CERTIFICATE-----""" diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/security/case.py b/thesisenv/lib/python3.6/site-packages/celery/tests/security/case.py new file mode 100644 index 0000000..ba421a9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/security/case.py @@ -0,0 +1,16 @@ +from __future__ import absolute_import + +from celery.tests.case import AppCase, SkipTest + +import sys + + +class SecurityCase(AppCase): + + def setup(self): + if sys.version_info[0] == 3: + raise SkipTest('PyOpenSSL does not work on Python 3') + try: + from OpenSSL import crypto # noqa + except ImportError: + raise SkipTest('OpenSSL.crypto not installed') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_certificate.py b/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_certificate.py new file mode 100644 index 0000000..6e153bd --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_certificate.py @@ -0,0 +1,78 @@ +from __future__ import absolute_import + +from celery.exceptions import SecurityError +from celery.security.certificate import Certificate, CertStore, FSCertStore + +from . import CERT1, CERT2, KEY1 +from .case import SecurityCase + +from celery.tests.case import Mock, SkipTest, mock_open, patch + + +class test_Certificate(SecurityCase): + + def test_valid_certificate(self): + Certificate(CERT1) + Certificate(CERT2) + + def test_invalid_certificate(self): + self.assertRaises((SecurityError, TypeError), Certificate, None) + self.assertRaises(SecurityError, Certificate, '') + self.assertRaises(SecurityError, Certificate, 'foo') + self.assertRaises(SecurityError, Certificate, CERT1[:20] + CERT1[21:]) + self.assertRaises(SecurityError, Certificate, KEY1) + + def test_has_expired(self): + raise SkipTest('cert actually expired') + self.assertFalse(Certificate(CERT1).has_expired()) + + +class test_CertStore(SecurityCase): + + def test_itercerts(self): + cert1 = Certificate(CERT1) + cert2 = Certificate(CERT2) + certstore = CertStore() + for c in certstore.itercerts(): + self.assertTrue(False) + certstore.add_cert(cert1) + certstore.add_cert(cert2) + for c in certstore.itercerts(): + self.assertIn(c, (cert1, cert2)) + + def test_duplicate(self): + cert1 = Certificate(CERT1) + certstore = CertStore() + certstore.add_cert(cert1) + self.assertRaises(SecurityError, certstore.add_cert, cert1) + + +class test_FSCertStore(SecurityCase): + + @patch('os.path.isdir') + @patch('glob.glob') + @patch('celery.security.certificate.Certificate') + def test_init(self, Certificate, glob, isdir): + cert = Certificate.return_value = Mock() + cert.has_expired.return_value = False + isdir.return_value = True + glob.return_value = ['foo.cert'] + with mock_open(): + cert.get_id.return_value = 1 + x = FSCertStore('/var/certs') + self.assertIn(1, x._certs) + glob.assert_called_with('/var/certs/*') + + # they both end up with the same id + glob.return_value = ['foo.cert', 'bar.cert'] + with self.assertRaises(SecurityError): + x = FSCertStore('/var/certs') + glob.return_value = ['foo.cert'] + + cert.has_expired.return_value = True + with self.assertRaises(SecurityError): + x = FSCertStore('/var/certs') + + isdir.return_value = False + with self.assertRaises(SecurityError): + x = FSCertStore('/var/certs') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_key.py b/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_key.py new file mode 100644 index 0000000..d8551b2 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_key.py @@ -0,0 +1,26 @@ +from __future__ import absolute_import + +from celery.exceptions import SecurityError +from celery.security.key import PrivateKey + +from . import CERT1, KEY1, KEY2 +from .case import SecurityCase + + +class test_PrivateKey(SecurityCase): + + def test_valid_private_key(self): + PrivateKey(KEY1) + PrivateKey(KEY2) + + def test_invalid_private_key(self): + self.assertRaises((SecurityError, TypeError), PrivateKey, None) + self.assertRaises(SecurityError, PrivateKey, '') + self.assertRaises(SecurityError, PrivateKey, 'foo') + self.assertRaises(SecurityError, PrivateKey, KEY1[:20] + KEY1[21:]) + self.assertRaises(SecurityError, PrivateKey, CERT1) + + def test_sign(self): + pkey = PrivateKey(KEY1) + pkey.sign('test', 'sha1') + self.assertRaises(ValueError, pkey.sign, 'test', 'unknown') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_security.py b/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_security.py new file mode 100644 index 0000000..227c65a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_security.py @@ -0,0 +1,110 @@ +""" +Keys and certificates for tests (KEY1 is a private key of CERT1, etc.) + +Generated with: + +.. code-block:: bash + + $ openssl genrsa -des3 -passout pass:test -out key1.key 1024 + $ openssl req -new -key key1.key -out key1.csr -passin pass:test + $ cp key1.key key1.key.org + $ openssl rsa -in key1.key.org -out key1.key -passin pass:test + $ openssl x509 -req -days 365 -in cert1.csr \ + -signkey key1.key -out cert1.crt + $ rm key1.key.org cert1.csr + +""" +from __future__ import absolute_import + +from kombu.serialization import disable_insecure_serializers + +from celery.exceptions import ImproperlyConfigured, SecurityError +from celery.five import builtins +from celery.security.utils import reraise_errors +from kombu.serialization import registry + +from .case import SecurityCase + +from celery.tests.case import Mock, mock_open, patch + + +class test_security(SecurityCase): + + def teardown(self): + registry._disabled_content_types.clear() + + def test_disable_insecure_serializers(self): + try: + disabled = registry._disabled_content_types + self.assertTrue(disabled) + + disable_insecure_serializers( + ['application/json', 'application/x-python-serialize'], + ) + self.assertIn('application/x-yaml', disabled) + self.assertNotIn('application/json', disabled) + self.assertNotIn('application/x-python-serialize', disabled) + disabled.clear() + + disable_insecure_serializers(allowed=None) + self.assertIn('application/x-yaml', disabled) + self.assertIn('application/json', disabled) + self.assertIn('application/x-python-serialize', disabled) + finally: + disable_insecure_serializers(allowed=['json']) + + def test_setup_security(self): + disabled = registry._disabled_content_types + self.assertEqual(0, len(disabled)) + + self.app.conf.CELERY_TASK_SERIALIZER = 'json' + self.app.setup_security() + self.assertIn('application/x-python-serialize', disabled) + disabled.clear() + + @patch('celery.security.register_auth') + @patch('celery.security._disable_insecure_serializers') + def test_setup_registry_complete(self, dis, reg, key='KEY', cert='CERT'): + calls = [0] + + def effect(*args): + try: + m = Mock() + m.read.return_value = 'B' if calls[0] else 'A' + return m + finally: + calls[0] += 1 + + self.app.conf.CELERY_TASK_SERIALIZER = 'auth' + with mock_open(side_effect=effect): + with patch('celery.security.registry') as registry: + store = Mock() + self.app.setup_security(['json'], key, cert, store) + dis.assert_called_with(['json']) + reg.assert_called_with('A', 'B', store, 'sha1', 'json') + registry._set_default_serializer.assert_called_with('auth') + + def test_security_conf(self): + self.app.conf.CELERY_TASK_SERIALIZER = 'auth' + with self.assertRaises(ImproperlyConfigured): + self.app.setup_security() + + _import = builtins.__import__ + + def import_hook(name, *args, **kwargs): + if name == 'OpenSSL': + raise ImportError + return _import(name, *args, **kwargs) + + builtins.__import__ = import_hook + with self.assertRaises(ImproperlyConfigured): + self.app.setup_security() + builtins.__import__ = _import + + def test_reraise_errors(self): + with self.assertRaises(SecurityError): + with reraise_errors(errors=(KeyError, )): + raise KeyError('foo') + with self.assertRaises(KeyError): + with reraise_errors(errors=(ValueError, )): + raise KeyError('bar') diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_serialization.py b/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_serialization.py new file mode 100644 index 0000000..50bc4bf --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/security/test_serialization.py @@ -0,0 +1,64 @@ +from __future__ import absolute_import + +import os +import base64 + +from kombu.serialization import registry + +from celery.exceptions import SecurityError +from celery.security.serialization import SecureSerializer, register_auth +from celery.security.certificate import Certificate, CertStore +from celery.security.key import PrivateKey + +from . import CERT1, CERT2, KEY1, KEY2 +from .case import SecurityCase + + +class test_SecureSerializer(SecurityCase): + + def _get_s(self, key, cert, certs): + store = CertStore() + for c in certs: + store.add_cert(Certificate(c)) + return SecureSerializer(PrivateKey(key), Certificate(cert), store) + + def test_serialize(self): + s = self._get_s(KEY1, CERT1, [CERT1]) + self.assertEqual(s.deserialize(s.serialize('foo')), 'foo') + + def test_deserialize(self): + s = self._get_s(KEY1, CERT1, [CERT1]) + self.assertRaises(SecurityError, s.deserialize, 'bad data') + + def test_unmatched_key_cert(self): + s = self._get_s(KEY1, CERT2, [CERT1, CERT2]) + self.assertRaises(SecurityError, + s.deserialize, s.serialize('foo')) + + def test_unknown_source(self): + s1 = self._get_s(KEY1, CERT1, [CERT2]) + s2 = self._get_s(KEY1, CERT1, []) + self.assertRaises(SecurityError, + s1.deserialize, s1.serialize('foo')) + self.assertRaises(SecurityError, + s2.deserialize, s2.serialize('foo')) + + def test_self_send(self): + s1 = self._get_s(KEY1, CERT1, [CERT1]) + s2 = self._get_s(KEY1, CERT1, [CERT1]) + self.assertEqual(s2.deserialize(s1.serialize('foo')), 'foo') + + def test_separate_ends(self): + s1 = self._get_s(KEY1, CERT1, [CERT2]) + s2 = self._get_s(KEY2, CERT2, [CERT1]) + self.assertEqual(s2.deserialize(s1.serialize('foo')), 'foo') + + def test_register_auth(self): + register_auth(KEY1, CERT1, '') + self.assertIn('application/data', registry._decoders) + + def test_lots_of_sign(self): + for i in range(1000): + rdata = base64.urlsafe_b64encode(os.urandom(265)) + s = self._get_s(KEY1, CERT1, [CERT1]) + self.assertEqual(s.deserialize(s.serialize(rdata)), rdata) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/slow/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/slow/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_canvas.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_canvas.py new file mode 100644 index 0000000..2508025 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_canvas.py @@ -0,0 +1,346 @@ +from __future__ import absolute_import + +from celery.canvas import ( + Signature, + chain, + group, + chord, + signature, + xmap, + xstarmap, + chunks, + _maybe_group, + maybe_signature, +) +from celery.result import EagerResult + +from celery.tests.case import AppCase, Mock + +SIG = Signature({'task': 'TASK', + 'args': ('A1', ), + 'kwargs': {'K1': 'V1'}, + 'options': {'task_id': 'TASK_ID'}, + 'subtask_type': ''}) + + +class CanvasCase(AppCase): + + def setup(self): + + @self.app.task(shared=False) + def add(x, y): + return x + y + self.add = add + + @self.app.task(shared=False) + def mul(x, y): + return x * y + self.mul = mul + + @self.app.task(shared=False) + def div(x, y): + return x / y + self.div = div + + +class test_Signature(CanvasCase): + + def test_getitem_property_class(self): + self.assertTrue(Signature.task) + self.assertTrue(Signature.args) + self.assertTrue(Signature.kwargs) + self.assertTrue(Signature.options) + self.assertTrue(Signature.subtask_type) + + def test_getitem_property(self): + self.assertEqual(SIG.task, 'TASK') + self.assertEqual(SIG.args, ('A1', )) + self.assertEqual(SIG.kwargs, {'K1': 'V1'}) + self.assertEqual(SIG.options, {'task_id': 'TASK_ID'}) + self.assertEqual(SIG.subtask_type, '') + + def test_link_on_scalar(self): + x = Signature('TASK', link=Signature('B')) + self.assertTrue(x.options['link']) + x.link(Signature('C')) + self.assertIsInstance(x.options['link'], list) + self.assertIn(Signature('B'), x.options['link']) + self.assertIn(Signature('C'), x.options['link']) + + def test_replace(self): + x = Signature('TASK', ('A'), {}) + self.assertTupleEqual(x.replace(args=('B', )).args, ('B', )) + self.assertDictEqual( + x.replace(kwargs={'FOO': 'BAR'}).kwargs, + {'FOO': 'BAR'}, + ) + self.assertDictEqual( + x.replace(options={'task_id': '123'}).options, + {'task_id': '123'}, + ) + + def test_set(self): + self.assertDictEqual( + Signature('TASK', x=1).set(task_id='2').options, + {'x': 1, 'task_id': '2'}, + ) + + def test_link(self): + x = signature(SIG) + x.link(SIG) + x.link(SIG) + self.assertIn(SIG, x.options['link']) + self.assertEqual(len(x.options['link']), 1) + + def test_link_error(self): + x = signature(SIG) + x.link_error(SIG) + x.link_error(SIG) + self.assertIn(SIG, x.options['link_error']) + self.assertEqual(len(x.options['link_error']), 1) + + def test_flatten_links(self): + tasks = [self.add.s(2, 2), self.mul.s(4), self.div.s(2)] + tasks[0].link(tasks[1]) + tasks[1].link(tasks[2]) + self.assertEqual(tasks[0].flatten_links(), tasks) + + def test_OR(self): + x = self.add.s(2, 2) | self.mul.s(4) + self.assertIsInstance(x, chain) + y = self.add.s(4, 4) | self.div.s(2) + z = x | y + self.assertIsInstance(y, chain) + self.assertIsInstance(z, chain) + self.assertEqual(len(z.tasks), 4) + with self.assertRaises(TypeError): + x | 10 + ax = self.add.s(2, 2) | (self.add.s(4) | self.add.s(8)) + self.assertIsInstance(ax, chain) + self.assertEqual(len(ax.tasks), 3, 'consolidates chain to chain') + + def test_INVERT(self): + x = self.add.s(2, 2) + x.apply_async = Mock() + x.apply_async.return_value = Mock() + x.apply_async.return_value.get = Mock() + x.apply_async.return_value.get.return_value = 4 + self.assertEqual(~x, 4) + self.assertTrue(x.apply_async.called) + + def test_merge_immutable(self): + x = self.add.si(2, 2, foo=1) + args, kwargs, options = x._merge((4, ), {'bar': 2}, {'task_id': 3}) + self.assertTupleEqual(args, (2, 2)) + self.assertDictEqual(kwargs, {'foo': 1}) + self.assertDictEqual(options, {'task_id': 3}) + + def test_set_immutable(self): + x = self.add.s(2, 2) + self.assertFalse(x.immutable) + x.set(immutable=True) + self.assertTrue(x.immutable) + x.set(immutable=False) + self.assertFalse(x.immutable) + + def test_election(self): + x = self.add.s(2, 2) + x.freeze('foo') + x.type.app.control = Mock() + r = x.election() + self.assertTrue(x.type.app.control.election.called) + self.assertEqual(r.id, 'foo') + + def test_AsyncResult_when_not_registered(self): + s = signature('xxx.not.registered', app=self.app) + self.assertTrue(s.AsyncResult) + + def test_apply_async_when_not_registered(self): + s = signature('xxx.not.registered', app=self.app) + self.assertTrue(s._apply_async) + + +class test_xmap_xstarmap(CanvasCase): + + def test_apply(self): + for type, attr in [(xmap, 'map'), (xstarmap, 'starmap')]: + args = [(i, i) for i in range(10)] + s = getattr(self.add, attr)(args) + s.type = Mock() + + s.apply_async(foo=1) + s.type.apply_async.assert_called_with( + (), {'task': self.add.s(), 'it': args}, foo=1, + ) + + self.assertEqual(type.from_dict(dict(s)), s) + self.assertTrue(repr(s)) + + +class test_chunks(CanvasCase): + + def test_chunks(self): + x = self.add.chunks(range(100), 10) + self.assertEqual( + dict(chunks.from_dict(dict(x), app=self.app)), dict(x), + ) + + self.assertTrue(x.group()) + self.assertEqual(len(x.group().tasks), 10) + + x.group = Mock() + gr = x.group.return_value = Mock() + + x.apply_async() + gr.apply_async.assert_called_with((), {}) + + x() + gr.assert_called_with() + + self.app.conf.CELERY_ALWAYS_EAGER = True + chunks.apply_chunks(app=self.app, **x['kwargs']) + + +class test_chain(CanvasCase): + + def test_repr(self): + x = self.add.s(2, 2) | self.add.s(2) + self.assertEqual( + repr(x), '%s(2, 2) | %s(2)' % (self.add.name, self.add.name), + ) + + def test_reverse(self): + x = self.add.s(2, 2) | self.add.s(2) + self.assertIsInstance(signature(x), chain) + self.assertIsInstance(signature(dict(x)), chain) + + def test_always_eager(self): + self.app.conf.CELERY_ALWAYS_EAGER = True + self.assertEqual(~(self.add.s(4, 4) | self.add.s(8)), 16) + + def test_apply(self): + x = chain(self.add.s(4, 4), self.add.s(8), self.add.s(10)) + res = x.apply() + self.assertIsInstance(res, EagerResult) + self.assertEqual(res.get(), 26) + + self.assertEqual(res.parent.get(), 16) + self.assertEqual(res.parent.parent.get(), 8) + self.assertIsNone(res.parent.parent.parent) + + def test_empty_chain_returns_none(self): + self.assertIsNone(chain(app=self.app)()) + self.assertIsNone(chain(app=self.app).apply_async()) + + def test_call_no_tasks(self): + x = chain() + self.assertFalse(x()) + + def test_call_with_tasks(self): + x = self.add.s(2, 2) | self.add.s(4) + x.apply_async = Mock() + x(2, 2, foo=1) + x.apply_async.assert_called_with((2, 2), {'foo': 1}) + + def test_from_dict_no_args__with_args(self): + x = dict(self.add.s(2, 2) | self.add.s(4)) + x['args'] = None + self.assertIsInstance(chain.from_dict(x), chain) + x['args'] = (2, ) + self.assertIsInstance(chain.from_dict(x), chain) + + def test_accepts_generator_argument(self): + x = chain(self.add.s(i) for i in range(10)) + self.assertTrue(x.tasks[0].type, self.add) + self.assertTrue(x.type) + + +class test_group(CanvasCase): + + def test_repr(self): + x = group([self.add.s(2, 2), self.add.s(4, 4)]) + self.assertEqual(repr(x), repr(x.tasks)) + + def test_reverse(self): + x = group([self.add.s(2, 2), self.add.s(4, 4)]) + self.assertIsInstance(signature(x), group) + self.assertIsInstance(signature(dict(x)), group) + + def test_maybe_group_sig(self): + self.assertListEqual( + _maybe_group(self.add.s(2, 2)), [self.add.s(2, 2)], + ) + + def test_from_dict(self): + x = group([self.add.s(2, 2), self.add.s(4, 4)]) + x['args'] = (2, 2) + self.assertTrue(group.from_dict(dict(x))) + x['args'] = None + self.assertTrue(group.from_dict(dict(x))) + + def test_call_empty_group(self): + x = group(app=self.app) + self.assertFalse(len(x())) + x.delay() + x.apply_async() + x() + + def test_skew(self): + g = group([self.add.s(i, i) for i in range(10)]) + g.skew(start=1, stop=10, step=1) + for i, task in enumerate(g.tasks): + self.assertEqual(task.options['countdown'], i + 1) + + def test_iter(self): + g = group([self.add.s(i, i) for i in range(10)]) + self.assertListEqual(list(iter(g)), g.tasks) + + +class test_chord(CanvasCase): + + def test_reverse(self): + x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) + self.assertIsInstance(signature(x), chord) + self.assertIsInstance(signature(dict(x)), chord) + + def test_clone_clones_body(self): + x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) + y = x.clone() + self.assertIsNot(x.kwargs['body'], y.kwargs['body']) + y.kwargs.pop('body') + z = y.clone() + self.assertIsNone(z.kwargs.get('body')) + + def test_links_to_body(self): + x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) + x.link(self.div.s(2)) + self.assertFalse(x.options.get('link')) + self.assertTrue(x.kwargs['body'].options['link']) + + x.link_error(self.div.s(2)) + self.assertFalse(x.options.get('link_error')) + self.assertTrue(x.kwargs['body'].options['link_error']) + + self.assertTrue(x.tasks) + self.assertTrue(x.body) + + def test_repr(self): + x = chord([self.add.s(2, 2), self.add.s(4, 4)], body=self.mul.s(4)) + self.assertTrue(repr(x)) + x.kwargs['body'] = None + self.assertIn('without body', repr(x)) + + +class test_maybe_signature(CanvasCase): + + def test_is_None(self): + self.assertIsNone(maybe_signature(None, app=self.app)) + + def test_is_dict(self): + self.assertIsInstance( + maybe_signature(dict(self.add.s()), app=self.app), Signature, + ) + + def test_when_sig(self): + s = self.add.s() + self.assertIs(maybe_signature(s, app=self.app), s) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_chord.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_chord.py new file mode 100644 index 0000000..dcc3304 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_chord.py @@ -0,0 +1,235 @@ +from __future__ import absolute_import + +from contextlib import contextmanager + +from celery import group +from celery import canvas +from celery import result +from celery.exceptions import ChordError, Retry +from celery.five import range +from celery.result import AsyncResult, GroupResult, EagerResult +from celery.tests.case import AppCase, Mock + + +def passthru(x): + return x + + +class ChordCase(AppCase): + + def setup(self): + + @self.app.task(shared=False) + def add(x, y): + return x + y + self.add = add + + +class TSR(GroupResult): + is_ready = True + value = None + + def ready(self): + return self.is_ready + + def join(self, propagate=True, **kwargs): + if propagate: + for value in self.value: + if isinstance(value, Exception): + raise value + return self.value + join_native = join + + def _failed_join_report(self): + for value in self.value: + if isinstance(value, Exception): + yield EagerResult('some_id', value, 'FAILURE') + + +class TSRNoReport(TSR): + + def _failed_join_report(self): + return iter([]) + + +@contextmanager +def patch_unlock_retry(app): + unlock = app.tasks['celery.chord_unlock'] + retry = Mock() + retry.return_value = Retry() + prev, unlock.retry = unlock.retry, retry + try: + yield unlock, retry + finally: + unlock.retry = prev + + +class test_unlock_chord_task(ChordCase): + + def test_unlock_ready(self): + + class AlwaysReady(TSR): + is_ready = True + value = [2, 4, 8, 6] + + with self._chord_context(AlwaysReady) as (cb, retry, _): + cb.type.apply_async.assert_called_with( + ([2, 4, 8, 6], ), {}, task_id=cb.id, + ) + # did not retry + self.assertFalse(retry.call_count) + + def test_callback_fails(self): + + class AlwaysReady(TSR): + is_ready = True + value = [2, 4, 8, 6] + + def setup(callback): + callback.apply_async.side_effect = IOError() + + with self._chord_context(AlwaysReady, setup) as (cb, retry, fail): + self.assertTrue(fail.called) + self.assertEqual( + fail.call_args[0][0], cb.id, + ) + self.assertIsInstance( + fail.call_args[1]['exc'], ChordError, + ) + + def test_unlock_ready_failed(self): + + class Failed(TSR): + is_ready = True + value = [2, KeyError('foo'), 8, 6] + + with self._chord_context(Failed) as (cb, retry, fail_current): + self.assertFalse(cb.type.apply_async.called) + # did not retry + self.assertFalse(retry.call_count) + self.assertTrue(fail_current.called) + self.assertEqual( + fail_current.call_args[0][0], cb.id, + ) + self.assertIsInstance( + fail_current.call_args[1]['exc'], ChordError, + ) + self.assertIn('some_id', str(fail_current.call_args[1]['exc'])) + + def test_unlock_ready_failed_no_culprit(self): + class Failed(TSRNoReport): + is_ready = True + value = [2, KeyError('foo'), 8, 6] + + with self._chord_context(Failed) as (cb, retry, fail_current): + self.assertTrue(fail_current.called) + self.assertEqual( + fail_current.call_args[0][0], cb.id, + ) + self.assertIsInstance( + fail_current.call_args[1]['exc'], ChordError, + ) + + @contextmanager + def _chord_context(self, ResultCls, setup=None, **kwargs): + @self.app.task(shared=False) + def callback(*args, **kwargs): + pass + self.app.finalize() + + pts, result.GroupResult = result.GroupResult, ResultCls + callback.apply_async = Mock() + callback_s = callback.s() + callback_s.id = 'callback_id' + fail_current = self.app.backend.fail_from_current_stack = Mock() + try: + with patch_unlock_retry(self.app) as (unlock, retry): + subtask, canvas.maybe_signature = ( + canvas.maybe_signature, passthru, + ) + if setup: + setup(callback) + try: + assert self.app.tasks['celery.chord_unlock'] is unlock + try: + unlock( + 'group_id', callback_s, + result=[ + self.app.AsyncResult(r) for r in ['1', 2, 3] + ], + GroupResult=ResultCls, **kwargs + ) + except Retry: + pass + finally: + canvas.maybe_signature = subtask + yield callback_s, retry, fail_current + finally: + result.GroupResult = pts + + def test_when_not_ready(self): + class NeverReady(TSR): + is_ready = False + + with self._chord_context(NeverReady, interval=10, max_retries=30) \ + as (cb, retry, _): + self.assertFalse(cb.type.apply_async.called) + # did retry + retry.assert_called_with(countdown=10, max_retries=30) + + def test_is_in_registry(self): + self.assertIn('celery.chord_unlock', self.app.tasks) + + +class test_chord(ChordCase): + + def test_eager(self): + from celery import chord + + @self.app.task(shared=False) + def addX(x, y): + return x + y + + @self.app.task(shared=False) + def sumX(n): + return sum(n) + + self.app.conf.CELERY_ALWAYS_EAGER = True + x = chord(addX.s(i, i) for i in range(10)) + body = sumX.s() + result = x(body) + self.assertEqual(result.get(), sum(i + i for i in range(10))) + + def test_apply(self): + self.app.conf.CELERY_ALWAYS_EAGER = False + from celery import chord + + m = Mock() + m.app.conf.CELERY_ALWAYS_EAGER = False + m.AsyncResult = AsyncResult + prev, chord._type = chord._type, m + try: + x = chord(self.add.s(i, i) for i in range(10)) + body = self.add.s(2) + result = x(body) + self.assertTrue(result.id) + # does not modify original subtask + with self.assertRaises(KeyError): + body.options['task_id'] + self.assertTrue(chord._type.called) + finally: + chord._type = prev + + +class test_Chord_task(ChordCase): + + def test_run(self): + self.app.backend = Mock() + self.app.backend.cleanup = Mock() + self.app.backend.cleanup.__name__ = 'cleanup' + Chord = self.app.tasks['celery.chord'] + + body = dict() + Chord(group(self.add.subtask((i, i)) for i in range(5)), body) + Chord([self.add.subtask((j, j)) for j in range(5)], body) + self.assertEqual(self.app.backend.apply_chord.call_count, 2) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_context.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_context.py new file mode 100644 index 0000000..ecad3f8 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_context.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*-' +from __future__ import absolute_import + +from celery.app.task import Context +from celery.tests.case import AppCase + + +# Retreive the values of all context attributes as a +# dictionary in an implementation-agnostic manner. +def get_context_as_dict(ctx, getter=getattr): + defaults = {} + for attr_name in dir(ctx): + if attr_name.startswith('_'): + continue # Ignore pseudo-private attributes + attr = getter(ctx, attr_name) + if callable(attr): + continue # Ignore methods and other non-trivial types + defaults[attr_name] = attr + return defaults +default_context = get_context_as_dict(Context()) + + +class test_Context(AppCase): + + def test_default_context(self): + # A bit of a tautological test, since it uses the same + # initializer as the default_context constructor. + defaults = dict(default_context, children=[]) + self.assertDictEqual(get_context_as_dict(Context()), defaults) + + def test_updated_context(self): + expected = dict(default_context) + changes = dict(id='unique id', args=['some', 1], wibble='wobble') + ctx = Context() + expected.update(changes) + ctx.update(changes) + self.assertDictEqual(get_context_as_dict(ctx), expected) + self.assertDictEqual(get_context_as_dict(Context()), default_context) + + def test_modified_context(self): + expected = dict(default_context) + ctx = Context() + expected['id'] = 'unique id' + expected['args'] = ['some', 1] + ctx.id = 'unique id' + ctx.args = ['some', 1] + self.assertDictEqual(get_context_as_dict(ctx), expected) + self.assertDictEqual(get_context_as_dict(Context()), default_context) + + def test_cleared_context(self): + changes = dict(id='unique id', args=['some', 1], wibble='wobble') + ctx = Context() + ctx.update(changes) + ctx.clear() + defaults = dict(default_context, children=[]) + self.assertDictEqual(get_context_as_dict(ctx), defaults) + self.assertDictEqual(get_context_as_dict(Context()), defaults) + + def test_context_get(self): + expected = dict(default_context) + changes = dict(id='unique id', args=['some', 1], wibble='wobble') + ctx = Context() + expected.update(changes) + ctx.update(changes) + ctx_dict = get_context_as_dict(ctx, getter=Context.get) + self.assertDictEqual(ctx_dict, expected) + self.assertDictEqual(get_context_as_dict(Context()), default_context) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_result.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_result.py new file mode 100644 index 0000000..50a9e23 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_result.py @@ -0,0 +1,731 @@ +from __future__ import absolute_import + +from contextlib import contextmanager + +from celery import states +from celery.exceptions import IncompleteStream, TimeoutError +from celery.five import range +from celery.result import ( + AsyncResult, + EagerResult, + TaskSetResult, + result_from_tuple, +) +from celery.utils import uuid +from celery.utils.serialization import pickle + +from celery.tests.case import AppCase, Mock, depends_on_current_app, patch + + +def mock_task(name, state, result): + return dict(id=uuid(), name=name, state=state, result=result) + + +def save_result(app, task): + traceback = 'Some traceback' + if task['state'] == states.SUCCESS: + app.backend.mark_as_done(task['id'], task['result']) + elif task['state'] == states.RETRY: + app.backend.mark_as_retry( + task['id'], task['result'], traceback=traceback, + ) + else: + app.backend.mark_as_failure( + task['id'], task['result'], traceback=traceback, + ) + + +def make_mock_group(app, size=10): + tasks = [mock_task('ts%d' % i, states.SUCCESS, i) for i in range(size)] + [save_result(app, task) for task in tasks] + return [app.AsyncResult(task['id']) for task in tasks] + + +class test_AsyncResult(AppCase): + + def setup(self): + self.task1 = mock_task('task1', states.SUCCESS, 'the') + self.task2 = mock_task('task2', states.SUCCESS, 'quick') + self.task3 = mock_task('task3', states.FAILURE, KeyError('brown')) + self.task4 = mock_task('task3', states.RETRY, KeyError('red')) + + for task in (self.task1, self.task2, self.task3, self.task4): + save_result(self.app, task) + + @self.app.task(shared=False) + def mytask(): + pass + self.mytask = mytask + + def test_compat_properties(self): + x = self.app.AsyncResult('1') + self.assertEqual(x.task_id, x.id) + x.task_id = '2' + self.assertEqual(x.id, '2') + + def test_children(self): + x = self.app.AsyncResult('1') + children = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)] + x._cache = {'children': children, 'status': states.SUCCESS} + x.backend = Mock() + self.assertTrue(x.children) + self.assertEqual(len(x.children), 3) + + def test_propagates_for_parent(self): + x = self.app.AsyncResult(uuid()) + x.backend = Mock(name='backend') + x.backend.get_task_meta.return_value = {} + x.backend.wait_for.return_value = { + 'status': states.SUCCESS, 'result': 84, + } + x.parent = EagerResult(uuid(), KeyError('foo'), states.FAILURE) + with self.assertRaises(KeyError): + x.get(propagate=True) + self.assertFalse(x.backend.wait_for.called) + + x.parent = EagerResult(uuid(), 42, states.SUCCESS) + self.assertEqual(x.get(propagate=True), 84) + self.assertTrue(x.backend.wait_for.called) + + def test_get_children(self): + tid = uuid() + x = self.app.AsyncResult(tid) + child = [self.app.AsyncResult(uuid()).as_tuple() + for i in range(10)] + x._cache = {'children': child} + self.assertTrue(x.children) + self.assertEqual(len(x.children), 10) + + x._cache = {'status': states.SUCCESS} + x.backend._cache[tid] = {'result': None} + self.assertIsNone(x.children) + + def test_build_graph_get_leaf_collect(self): + x = self.app.AsyncResult('1') + x.backend._cache['1'] = {'status': states.SUCCESS, 'result': None} + c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)] + x.iterdeps = Mock() + x.iterdeps.return_value = ( + (None, x), + (x, c[0]), + (c[0], c[1]), + (c[1], c[2]) + ) + x.backend.READY_STATES = states.READY_STATES + self.assertTrue(x.graph) + + self.assertIs(x.get_leaf(), 2) + + it = x.collect() + self.assertListEqual(list(it), [ + (x, None), + (c[0], 0), + (c[1], 1), + (c[2], 2), + ]) + + def test_iterdeps(self): + x = self.app.AsyncResult('1') + c = [EagerResult(str(i), i, states.SUCCESS) for i in range(3)] + x._cache = {'status': states.SUCCESS, 'result': None, 'children': c} + for child in c: + child.backend = Mock() + child.backend.get_children.return_value = [] + it = x.iterdeps() + self.assertListEqual(list(it), [ + (None, x), + (x, c[0]), + (x, c[1]), + (x, c[2]), + ]) + x._cache = None + x.ready = Mock() + x.ready.return_value = False + with self.assertRaises(IncompleteStream): + list(x.iterdeps()) + list(x.iterdeps(intermediate=True)) + + def test_eq_not_implemented(self): + self.assertFalse(self.app.AsyncResult('1') == object()) + + @depends_on_current_app + def test_reduce(self): + a1 = self.app.AsyncResult('uuid', task_name=self.mytask.name) + restored = pickle.loads(pickle.dumps(a1)) + self.assertEqual(restored.id, 'uuid') + self.assertEqual(restored.task_name, self.mytask.name) + + a2 = self.app.AsyncResult('uuid') + self.assertEqual(pickle.loads(pickle.dumps(a2)).id, 'uuid') + + def test_successful(self): + ok_res = self.app.AsyncResult(self.task1['id']) + nok_res = self.app.AsyncResult(self.task3['id']) + nok_res2 = self.app.AsyncResult(self.task4['id']) + + self.assertTrue(ok_res.successful()) + self.assertFalse(nok_res.successful()) + self.assertFalse(nok_res2.successful()) + + pending_res = self.app.AsyncResult(uuid()) + self.assertFalse(pending_res.successful()) + + def test_str(self): + ok_res = self.app.AsyncResult(self.task1['id']) + ok2_res = self.app.AsyncResult(self.task2['id']) + nok_res = self.app.AsyncResult(self.task3['id']) + self.assertEqual(str(ok_res), self.task1['id']) + self.assertEqual(str(ok2_res), self.task2['id']) + self.assertEqual(str(nok_res), self.task3['id']) + + pending_id = uuid() + pending_res = self.app.AsyncResult(pending_id) + self.assertEqual(str(pending_res), pending_id) + + def test_repr(self): + ok_res = self.app.AsyncResult(self.task1['id']) + ok2_res = self.app.AsyncResult(self.task2['id']) + nok_res = self.app.AsyncResult(self.task3['id']) + self.assertEqual(repr(ok_res), '' % ( + self.task1['id'])) + self.assertEqual(repr(ok2_res), '' % ( + self.task2['id'])) + self.assertEqual(repr(nok_res), '' % ( + self.task3['id'])) + + pending_id = uuid() + pending_res = self.app.AsyncResult(pending_id) + self.assertEqual(repr(pending_res), '' % ( + pending_id)) + + def test_hash(self): + self.assertEqual(hash(self.app.AsyncResult('x0w991')), + hash(self.app.AsyncResult('x0w991'))) + self.assertNotEqual(hash(self.app.AsyncResult('x0w991')), + hash(self.app.AsyncResult('x1w991'))) + + def test_get_traceback(self): + ok_res = self.app.AsyncResult(self.task1['id']) + nok_res = self.app.AsyncResult(self.task3['id']) + nok_res2 = self.app.AsyncResult(self.task4['id']) + self.assertFalse(ok_res.traceback) + self.assertTrue(nok_res.traceback) + self.assertTrue(nok_res2.traceback) + + pending_res = self.app.AsyncResult(uuid()) + self.assertFalse(pending_res.traceback) + + def test_get(self): + ok_res = self.app.AsyncResult(self.task1['id']) + ok2_res = self.app.AsyncResult(self.task2['id']) + nok_res = self.app.AsyncResult(self.task3['id']) + nok2_res = self.app.AsyncResult(self.task4['id']) + + self.assertEqual(ok_res.get(), 'the') + self.assertEqual(ok2_res.get(), 'quick') + with self.assertRaises(KeyError): + nok_res.get() + self.assertTrue(nok_res.get(propagate=False)) + self.assertIsInstance(nok2_res.result, KeyError) + self.assertEqual(ok_res.info, 'the') + + def test_get_timeout(self): + res = self.app.AsyncResult(self.task4['id']) # has RETRY state + with self.assertRaises(TimeoutError): + res.get(timeout=0.001) + + pending_res = self.app.AsyncResult(uuid()) + with patch('celery.result.time') as _time: + with self.assertRaises(TimeoutError): + pending_res.get(timeout=0.001, interval=0.001) + _time.sleep.assert_called_with(0.001) + + def test_get_timeout_longer(self): + res = self.app.AsyncResult(self.task4['id']) # has RETRY state + with patch('celery.result.time') as _time: + with self.assertRaises(TimeoutError): + res.get(timeout=1, interval=1) + _time.sleep.assert_called_with(1) + + def test_ready(self): + oks = (self.app.AsyncResult(self.task1['id']), + self.app.AsyncResult(self.task2['id']), + self.app.AsyncResult(self.task3['id'])) + self.assertTrue(all(result.ready() for result in oks)) + self.assertFalse(self.app.AsyncResult(self.task4['id']).ready()) + + self.assertFalse(self.app.AsyncResult(uuid()).ready()) + + +class test_ResultSet(AppCase): + + def test_resultset_repr(self): + self.assertTrue(repr(self.app.ResultSet( + [self.app.AsyncResult(t) for t in ['1', '2', '3']]))) + + def test_eq_other(self): + self.assertFalse(self.app.ResultSet([1, 3, 3]) == 1) + self.assertTrue(self.app.ResultSet([1]) == self.app.ResultSet([1])) + + def test_get(self): + x = self.app.ResultSet([self.app.AsyncResult(t) for t in [1, 2, 3]]) + b = x.results[0].backend = Mock() + b.supports_native_join = False + x.join_native = Mock() + x.join = Mock() + x.get() + self.assertTrue(x.join.called) + b.supports_native_join = True + x.get() + self.assertTrue(x.join_native.called) + + def test_get_empty(self): + x = self.app.ResultSet([]) + self.assertIsNone(x.supports_native_join) + x.join = Mock(name='join') + x.get() + self.assertTrue(x.join.called) + + def test_add(self): + x = self.app.ResultSet([1]) + x.add(2) + self.assertEqual(len(x), 2) + x.add(2) + self.assertEqual(len(x), 2) + + @contextmanager + def dummy_copy(self): + with patch('celery.result.copy') as copy: + + def passt(arg): + return arg + copy.side_effect = passt + + yield + + def test_iterate_respects_subpolling_interval(self): + r1 = self.app.AsyncResult(uuid()) + r2 = self.app.AsyncResult(uuid()) + backend = r1.backend = r2.backend = Mock() + backend.subpolling_interval = 10 + + ready = r1.ready = r2.ready = Mock() + + def se(*args, **kwargs): + ready.side_effect = KeyError() + return False + ready.return_value = False + ready.side_effect = se + + x = self.app.ResultSet([r1, r2]) + with self.dummy_copy(): + with patch('celery.result.time') as _time: + with self.assertPendingDeprecation(): + with self.assertRaises(KeyError): + list(x.iterate()) + _time.sleep.assert_called_with(10) + + backend.subpolling_interval = 0 + with patch('celery.result.time') as _time: + with self.assertPendingDeprecation(): + with self.assertRaises(KeyError): + ready.return_value = False + ready.side_effect = se + list(x.iterate()) + self.assertFalse(_time.sleep.called) + + def test_times_out(self): + r1 = self.app.AsyncResult(uuid) + r1.ready = Mock() + r1.ready.return_value = False + x = self.app.ResultSet([r1]) + with self.dummy_copy(): + with patch('celery.result.time'): + with self.assertPendingDeprecation(): + with self.assertRaises(TimeoutError): + list(x.iterate(timeout=1)) + + def test_add_discard(self): + x = self.app.ResultSet([]) + x.add(self.app.AsyncResult('1')) + self.assertIn(self.app.AsyncResult('1'), x.results) + x.discard(self.app.AsyncResult('1')) + x.discard(self.app.AsyncResult('1')) + x.discard('1') + self.assertNotIn(self.app.AsyncResult('1'), x.results) + + x.update([self.app.AsyncResult('2')]) + + def test_clear(self): + x = self.app.ResultSet([]) + r = x.results + x.clear() + self.assertIs(x.results, r) + + +class MockAsyncResultFailure(AsyncResult): + + @property + def result(self): + return KeyError('baz') + + @property + def state(self): + return states.FAILURE + + def get(self, propagate=True, **kwargs): + if propagate: + raise self.result + return self.result + + +class MockAsyncResultSuccess(AsyncResult): + forgotten = False + + def forget(self): + self.forgotten = True + + @property + def result(self): + return 42 + + @property + def state(self): + return states.SUCCESS + + def get(self, **kwargs): + return self.result + + +class SimpleBackend(object): + ids = [] + + def __init__(self, ids=[]): + self.ids = ids + + def get_many(self, *args, **kwargs): + return ((id, {'result': i, 'status': states.SUCCESS}) + for i, id in enumerate(self.ids)) + + +class test_TaskSetResult(AppCase): + + def setup(self): + self.size = 10 + self.ts = TaskSetResult(uuid(), make_mock_group(self.app, self.size)) + + def test_total(self): + self.assertEqual(self.ts.total, self.size) + + def test_compat_properties(self): + self.assertEqual(self.ts.taskset_id, self.ts.id) + self.ts.taskset_id = 'foo' + self.assertEqual(self.ts.taskset_id, 'foo') + + def test_compat_subtasks_kwarg(self): + x = TaskSetResult(uuid(), subtasks=[1, 2, 3]) + self.assertEqual(x.results, [1, 2, 3]) + + def test_itersubtasks(self): + it = self.ts.itersubtasks() + + for i, t in enumerate(it): + self.assertEqual(t.get(), i) + + +class test_GroupResult(AppCase): + + def setup(self): + self.size = 10 + self.ts = self.app.GroupResult( + uuid(), make_mock_group(self.app, self.size), + ) + + @depends_on_current_app + def test_is_pickleable(self): + ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) + self.assertEqual(pickle.loads(pickle.dumps(ts)), ts) + ts2 = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) + self.assertEqual(pickle.loads(pickle.dumps(ts2)), ts2) + + def test_len(self): + self.assertEqual(len(self.ts), self.size) + + def test_eq_other(self): + self.assertFalse(self.ts == 1) + + @depends_on_current_app + def test_reduce(self): + self.assertTrue(pickle.loads(pickle.dumps(self.ts))) + + def test_iterate_raises(self): + ar = MockAsyncResultFailure(uuid(), app=self.app) + ts = self.app.GroupResult(uuid(), [ar]) + with self.assertPendingDeprecation(): + it = ts.iterate() + with self.assertRaises(KeyError): + next(it) + + def test_forget(self): + subs = [MockAsyncResultSuccess(uuid(), app=self.app), + MockAsyncResultSuccess(uuid(), app=self.app)] + ts = self.app.GroupResult(uuid(), subs) + ts.forget() + for sub in subs: + self.assertTrue(sub.forgotten) + + def test_getitem(self): + subs = [MockAsyncResultSuccess(uuid(), app=self.app), + MockAsyncResultSuccess(uuid(), app=self.app)] + ts = self.app.GroupResult(uuid(), subs) + self.assertIs(ts[0], subs[0]) + + def test_save_restore(self): + subs = [MockAsyncResultSuccess(uuid(), app=self.app), + MockAsyncResultSuccess(uuid(), app=self.app)] + ts = self.app.GroupResult(uuid(), subs) + ts.save() + with self.assertRaises(AttributeError): + ts.save(backend=object()) + self.assertEqual(self.app.GroupResult.restore(ts.id).subtasks, + ts.subtasks) + ts.delete() + self.assertIsNone(self.app.GroupResult.restore(ts.id)) + with self.assertRaises(AttributeError): + self.app.GroupResult.restore(ts.id, backend=object()) + + def test_join_native(self): + backend = SimpleBackend() + subtasks = [self.app.AsyncResult(uuid(), backend=backend) + for i in range(10)] + ts = self.app.GroupResult(uuid(), subtasks) + ts.app.backend = backend + backend.ids = [subtask.id for subtask in subtasks] + res = ts.join_native() + self.assertEqual(res, list(range(10))) + + def test_join_native_raises(self): + ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) + ts.iter_native = Mock() + ts.iter_native.return_value = iter([ + (uuid(), {'status': states.FAILURE, 'result': KeyError()}) + ]) + with self.assertRaises(KeyError): + ts.join_native(propagate=True) + + def test_failed_join_report(self): + res = Mock() + ts = self.app.GroupResult(uuid(), [res]) + res.state = states.FAILURE + res.backend.is_cached.return_value = True + self.assertIs(next(ts._failed_join_report()), res) + res.backend.is_cached.return_value = False + with self.assertRaises(StopIteration): + next(ts._failed_join_report()) + + def test_repr(self): + self.assertTrue(repr( + self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) + )) + + def test_children_is_results(self): + ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())]) + self.assertIs(ts.children, ts.results) + + def test_iter_native(self): + backend = SimpleBackend() + subtasks = [self.app.AsyncResult(uuid(), backend=backend) + for i in range(10)] + ts = self.app.GroupResult(uuid(), subtasks) + ts.app.backend = backend + backend.ids = [subtask.id for subtask in subtasks] + self.assertEqual(len(list(ts.iter_native())), 10) + + def test_iterate_yields(self): + ar = MockAsyncResultSuccess(uuid(), app=self.app) + ar2 = MockAsyncResultSuccess(uuid(), app=self.app) + ts = self.app.GroupResult(uuid(), [ar, ar2]) + with self.assertPendingDeprecation(): + it = ts.iterate() + self.assertEqual(next(it), 42) + self.assertEqual(next(it), 42) + + def test_iterate_eager(self): + ar1 = EagerResult(uuid(), 42, states.SUCCESS) + ar2 = EagerResult(uuid(), 42, states.SUCCESS) + ts = self.app.GroupResult(uuid(), [ar1, ar2]) + with self.assertPendingDeprecation(): + it = ts.iterate() + self.assertEqual(next(it), 42) + self.assertEqual(next(it), 42) + + def test_join_timeout(self): + ar = MockAsyncResultSuccess(uuid(), app=self.app) + ar2 = MockAsyncResultSuccess(uuid(), app=self.app) + ar3 = self.app.AsyncResult(uuid()) + ts = self.app.GroupResult(uuid(), [ar, ar2, ar3]) + with self.assertRaises(TimeoutError): + ts.join(timeout=0.0000001) + + ar4 = self.app.AsyncResult(uuid()) + ar4.get = Mock() + ts2 = self.app.GroupResult(uuid(), [ar4]) + self.assertTrue(ts2.join(timeout=0.1)) + + def test_iter_native_when_empty_group(self): + ts = self.app.GroupResult(uuid(), []) + self.assertListEqual(list(ts.iter_native()), []) + + def test_iterate_simple(self): + with self.assertPendingDeprecation(): + it = self.ts.iterate() + results = sorted(list(it)) + self.assertListEqual(results, list(range(self.size))) + + def test___iter__(self): + self.assertListEqual(list(iter(self.ts)), self.ts.results) + + def test_join(self): + joined = self.ts.join() + self.assertListEqual(joined, list(range(self.size))) + + def test_successful(self): + self.assertTrue(self.ts.successful()) + + def test_failed(self): + self.assertFalse(self.ts.failed()) + + def test_waiting(self): + self.assertFalse(self.ts.waiting()) + + def test_ready(self): + self.assertTrue(self.ts.ready()) + + def test_completed_count(self): + self.assertEqual(self.ts.completed_count(), len(self.ts)) + + +class test_pending_AsyncResult(AppCase): + + def setup(self): + self.task = self.app.AsyncResult(uuid()) + + def test_result(self): + self.assertIsNone(self.task.result) + + +class test_failed_AsyncResult(test_GroupResult): + + def setup(self): + self.size = 11 + subtasks = make_mock_group(self.app, 10) + failed = mock_task('ts11', states.FAILURE, KeyError('Baz')) + save_result(self.app, failed) + failed_res = self.app.AsyncResult(failed['id']) + self.ts = self.app.GroupResult(uuid(), subtasks + [failed_res]) + + def test_completed_count(self): + self.assertEqual(self.ts.completed_count(), len(self.ts) - 1) + + def test_iterate_simple(self): + with self.assertPendingDeprecation(): + it = self.ts.iterate() + + def consume(): + return list(it) + + with self.assertRaises(KeyError): + consume() + + def test_join(self): + with self.assertRaises(KeyError): + self.ts.join() + + def test_successful(self): + self.assertFalse(self.ts.successful()) + + def test_failed(self): + self.assertTrue(self.ts.failed()) + + +class test_pending_Group(AppCase): + + def setup(self): + self.ts = self.app.GroupResult( + uuid(), [self.app.AsyncResult(uuid()), + self.app.AsyncResult(uuid())]) + + def test_completed_count(self): + self.assertEqual(self.ts.completed_count(), 0) + + def test_ready(self): + self.assertFalse(self.ts.ready()) + + def test_waiting(self): + self.assertTrue(self.ts.waiting()) + + def x_join(self): + with self.assertRaises(TimeoutError): + self.ts.join(timeout=0.001) + + def x_join_longer(self): + with self.assertRaises(TimeoutError): + self.ts.join(timeout=1) + + +class test_EagerResult(AppCase): + + def setup(self): + + @self.app.task(shared=False) + def raising(x, y): + raise KeyError(x, y) + self.raising = raising + + def test_wait_raises(self): + res = self.raising.apply(args=[3, 3]) + with self.assertRaises(KeyError): + res.wait() + self.assertTrue(res.wait(propagate=False)) + + def test_wait(self): + res = EagerResult('x', 'x', states.RETRY) + res.wait() + self.assertEqual(res.state, states.RETRY) + self.assertEqual(res.status, states.RETRY) + + def test_forget(self): + res = EagerResult('x', 'x', states.RETRY) + res.forget() + + def test_revoke(self): + res = self.raising.apply(args=[3, 3]) + self.assertFalse(res.revoke()) + + +class test_tuples(AppCase): + + def test_AsyncResult(self): + x = self.app.AsyncResult(uuid()) + self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app)) + self.assertEqual(x, result_from_tuple(x, self.app)) + + def test_with_parent(self): + x = self.app.AsyncResult(uuid()) + x.parent = self.app.AsyncResult(uuid()) + y = result_from_tuple(x.as_tuple(), self.app) + self.assertEqual(y, x) + self.assertEqual(y.parent, x.parent) + self.assertIsInstance(y.parent, AsyncResult) + + def test_compat(self): + uid = uuid() + x = result_from_tuple([uid, []], app=self.app) + self.assertEqual(x.id, uid) + + def test_GroupResult(self): + x = self.app.GroupResult( + uuid(), [self.app.AsyncResult(uuid()) for _ in range(10)], + ) + self.assertEqual(x, result_from_tuple(x.as_tuple(), self.app)) + self.assertEqual(x, result_from_tuple(x, self.app)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_states.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_states.py new file mode 100644 index 0000000..b30a4ee --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_states.py @@ -0,0 +1,31 @@ +from __future__ import absolute_import + +from celery.states import state +from celery import states +from celery.tests.case import Case + + +class test_state_precedence(Case): + + def test_gt(self): + self.assertGreater(state(states.SUCCESS), + state(states.PENDING)) + self.assertGreater(state(states.FAILURE), + state(states.RECEIVED)) + self.assertGreater(state(states.REVOKED), + state(states.STARTED)) + self.assertGreater(state(states.SUCCESS), + state('CRASHED')) + self.assertGreater(state(states.FAILURE), + state('CRASHED')) + self.assertFalse(state(states.REVOKED) > state('CRASHED')) + + def test_lt(self): + self.assertLess(state(states.PENDING), state(states.SUCCESS)) + self.assertLess(state(states.RECEIVED), state(states.FAILURE)) + self.assertLess(state(states.STARTED), state(states.REVOKED)) + self.assertLess(state('CRASHED'), state(states.SUCCESS)) + self.assertLess(state('CRASHED'), state(states.FAILURE)) + self.assertTrue(state(states.REVOKED) < state('CRASHED')) + self.assertTrue(state(states.REVOKED) <= state('CRASHED')) + self.assertTrue(state('CRASHED') >= state(states.REVOKED)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_tasks.py b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_tasks.py new file mode 100644 index 0000000..4feae0b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/tasks/test_tasks.py @@ -0,0 +1,464 @@ +from __future__ import absolute_import + +from datetime import datetime, timedelta + +from kombu import Queue + +from celery import Task + +from celery.exceptions import Retry +from celery.five import items, range, string_t +from celery.result import EagerResult +from celery.utils import uuid +from celery.utils.timeutils import parse_iso8601 + +from celery.tests.case import AppCase, depends_on_current_app, patch + + +def return_True(*args, **kwargs): + # Task run functions can't be closures/lambdas, as they're pickled. + return True + + +def raise_exception(self, **kwargs): + raise Exception('%s error' % self.__class__) + + +class MockApplyTask(Task): + abstract = True + applied = 0 + + def run(self, x, y): + return x * y + + def apply_async(self, *args, **kwargs): + self.applied += 1 + + +class TasksCase(AppCase): + + def setup(self): + self.mytask = self.app.task(shared=False)(return_True) + + @self.app.task(bind=True, count=0, shared=False) + def increment_counter(self, increment_by=1): + self.count += increment_by or 1 + return self.count + self.increment_counter = increment_counter + + @self.app.task(shared=False) + def raising(): + raise KeyError('foo') + self.raising = raising + + @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) + def retry_task(self, arg1, arg2, kwarg=1, max_retries=None, care=True): + self.iterations += 1 + rmax = self.max_retries if max_retries is None else max_retries + + assert repr(self.request) + retries = self.request.retries + if care and retries >= rmax: + return arg1 + else: + raise self.retry(countdown=0, max_retries=rmax) + self.retry_task = retry_task + + @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) + def retry_task_noargs(self, **kwargs): + self.iterations += 1 + + if self.request.retries >= 3: + return 42 + else: + raise self.retry(countdown=0) + self.retry_task_noargs = retry_task_noargs + + @self.app.task(bind=True, max_retries=3, iterations=0, + base=MockApplyTask, shared=False) + def retry_task_mockapply(self, arg1, arg2, kwarg=1): + self.iterations += 1 + + retries = self.request.retries + if retries >= 3: + return arg1 + raise self.retry(countdown=0) + self.retry_task_mockapply = retry_task_mockapply + + @self.app.task(bind=True, max_retries=3, iterations=0, shared=False) + def retry_task_customexc(self, arg1, arg2, kwarg=1, **kwargs): + self.iterations += 1 + + retries = self.request.retries + if retries >= 3: + return arg1 + kwarg + else: + try: + raise MyCustomException('Elaine Marie Benes') + except MyCustomException as exc: + kwargs.update(kwarg=kwarg) + raise self.retry(countdown=0, exc=exc) + self.retry_task_customexc = retry_task_customexc + + +class MyCustomException(Exception): + """Random custom exception.""" + + +class test_task_retries(TasksCase): + + def test_retry(self): + self.retry_task.max_retries = 3 + self.retry_task.iterations = 0 + self.retry_task.apply([0xFF, 0xFFFF]) + self.assertEqual(self.retry_task.iterations, 4) + + self.retry_task.max_retries = 3 + self.retry_task.iterations = 0 + self.retry_task.apply([0xFF, 0xFFFF], {'max_retries': 10}) + self.assertEqual(self.retry_task.iterations, 11) + + def test_retry_no_args(self): + self.retry_task_noargs.max_retries = 3 + self.retry_task_noargs.iterations = 0 + self.retry_task_noargs.apply(propagate=True).get() + self.assertEqual(self.retry_task_noargs.iterations, 4) + + def test_retry_kwargs_can_be_empty(self): + self.retry_task_mockapply.push_request() + try: + with self.assertRaises(Retry): + import sys + try: + sys.exc_clear() + except AttributeError: + pass + self.retry_task_mockapply.retry(args=[4, 4], kwargs=None) + finally: + self.retry_task_mockapply.pop_request() + + def test_retry_not_eager(self): + self.retry_task_mockapply.push_request() + try: + self.retry_task_mockapply.request.called_directly = False + exc = Exception('baz') + try: + self.retry_task_mockapply.retry( + args=[4, 4], kwargs={'task_retries': 0}, + exc=exc, throw=False, + ) + self.assertTrue(self.retry_task_mockapply.applied) + finally: + self.retry_task_mockapply.applied = 0 + + try: + with self.assertRaises(Retry): + self.retry_task_mockapply.retry( + args=[4, 4], kwargs={'task_retries': 0}, + exc=exc, throw=True) + self.assertTrue(self.retry_task_mockapply.applied) + finally: + self.retry_task_mockapply.applied = 0 + finally: + self.retry_task_mockapply.pop_request() + + def test_retry_with_kwargs(self): + self.retry_task_customexc.max_retries = 3 + self.retry_task_customexc.iterations = 0 + self.retry_task_customexc.apply([0xFF, 0xFFFF], {'kwarg': 0xF}) + self.assertEqual(self.retry_task_customexc.iterations, 4) + + def test_retry_with_custom_exception(self): + self.retry_task_customexc.max_retries = 2 + self.retry_task_customexc.iterations = 0 + result = self.retry_task_customexc.apply( + [0xFF, 0xFFFF], {'kwarg': 0xF}, + ) + with self.assertRaises(MyCustomException): + result.get() + self.assertEqual(self.retry_task_customexc.iterations, 3) + + def test_max_retries_exceeded(self): + self.retry_task.max_retries = 2 + self.retry_task.iterations = 0 + result = self.retry_task.apply([0xFF, 0xFFFF], {'care': False}) + with self.assertRaises(self.retry_task.MaxRetriesExceededError): + result.get() + self.assertEqual(self.retry_task.iterations, 3) + + self.retry_task.max_retries = 1 + self.retry_task.iterations = 0 + result = self.retry_task.apply([0xFF, 0xFFFF], {'care': False}) + with self.assertRaises(self.retry_task.MaxRetriesExceededError): + result.get() + self.assertEqual(self.retry_task.iterations, 2) + + +class test_canvas_utils(TasksCase): + + def test_si(self): + self.assertTrue(self.retry_task.si()) + self.assertTrue(self.retry_task.si().immutable) + + def test_chunks(self): + self.assertTrue(self.retry_task.chunks(range(100), 10)) + + def test_map(self): + self.assertTrue(self.retry_task.map(range(100))) + + def test_starmap(self): + self.assertTrue(self.retry_task.starmap(range(100))) + + def test_on_success(self): + self.retry_task.on_success(1, 1, (), {}) + + +class test_tasks(TasksCase): + + def now(self): + return self.app.now() + + @depends_on_current_app + def test_unpickle_task(self): + import pickle + + @self.app.task(shared=True) + def xxx(): + pass + self.assertIs(pickle.loads(pickle.dumps(xxx)), xxx.app.tasks[xxx.name]) + + def test_AsyncResult(self): + task_id = uuid() + result = self.retry_task.AsyncResult(task_id) + self.assertEqual(result.backend, self.retry_task.backend) + self.assertEqual(result.id, task_id) + + def assertNextTaskDataEqual(self, consumer, presult, task_name, + test_eta=False, test_expires=False, **kwargs): + next_task = consumer.queues[0].get(accept=['pickle']) + task_data = next_task.decode() + self.assertEqual(task_data['id'], presult.id) + self.assertEqual(task_data['task'], task_name) + task_kwargs = task_data.get('kwargs', {}) + if test_eta: + self.assertIsInstance(task_data.get('eta'), string_t) + to_datetime = parse_iso8601(task_data.get('eta')) + self.assertIsInstance(to_datetime, datetime) + if test_expires: + self.assertIsInstance(task_data.get('expires'), string_t) + to_datetime = parse_iso8601(task_data.get('expires')) + self.assertIsInstance(to_datetime, datetime) + for arg_name, arg_value in items(kwargs): + self.assertEqual(task_kwargs.get(arg_name), arg_value) + + def test_incomplete_task_cls(self): + + class IncompleteTask(Task): + app = self.app + name = 'c.unittest.t.itask' + + with self.assertRaises(NotImplementedError): + IncompleteTask().run() + + def test_task_kwargs_must_be_dictionary(self): + with self.assertRaises(ValueError): + self.increment_counter.apply_async([], 'str') + + def test_task_args_must_be_list(self): + with self.assertRaises(ValueError): + self.increment_counter.apply_async('str', {}) + + def test_regular_task(self): + self.assertIsInstance(self.mytask, Task) + self.assertTrue(self.mytask.run()) + self.assertTrue( + callable(self.mytask), 'Task class is callable()', + ) + self.assertTrue(self.mytask(), 'Task class runs run() when called') + + with self.app.connection_or_acquire() as conn: + consumer = self.app.amqp.TaskConsumer(conn) + with self.assertRaises(NotImplementedError): + consumer.receive('foo', 'foo') + consumer.purge() + self.assertIsNone(consumer.queues[0].get()) + self.app.amqp.TaskConsumer(conn, queues=[Queue('foo')]) + + # Without arguments. + presult = self.mytask.delay() + self.assertNextTaskDataEqual(consumer, presult, self.mytask.name) + + # With arguments. + presult2 = self.mytask.apply_async( + kwargs=dict(name='George Costanza'), + ) + self.assertNextTaskDataEqual( + consumer, presult2, self.mytask.name, name='George Costanza', + ) + + # send_task + sresult = self.app.send_task(self.mytask.name, + kwargs=dict(name='Elaine M. Benes')) + self.assertNextTaskDataEqual( + consumer, sresult, self.mytask.name, name='Elaine M. Benes', + ) + + # With eta. + presult2 = self.mytask.apply_async( + kwargs=dict(name='George Costanza'), + eta=self.now() + timedelta(days=1), + expires=self.now() + timedelta(days=2), + ) + self.assertNextTaskDataEqual( + consumer, presult2, self.mytask.name, + name='George Costanza', test_eta=True, test_expires=True, + ) + + # With countdown. + presult2 = self.mytask.apply_async( + kwargs=dict(name='George Costanza'), countdown=10, expires=12, + ) + self.assertNextTaskDataEqual( + consumer, presult2, self.mytask.name, + name='George Costanza', test_eta=True, test_expires=True, + ) + + # Discarding all tasks. + consumer.purge() + self.mytask.apply_async() + self.assertEqual(consumer.purge(), 1) + self.assertIsNone(consumer.queues[0].get()) + + self.assertFalse(presult.successful()) + self.mytask.backend.mark_as_done(presult.id, result=None) + self.assertTrue(presult.successful()) + + def test_repr_v2_compat(self): + self.mytask.__v2_compat__ = True + self.assertIn('v2 compatible', repr(self.mytask)) + + def test_apply_with_self(self): + + @self.app.task(__self__=42, shared=False) + def tawself(self): + return self + + self.assertEqual(tawself.apply().get(), 42) + + self.assertEqual(tawself(), 42) + + def test_context_get(self): + self.mytask.push_request() + try: + request = self.mytask.request + request.foo = 32 + self.assertEqual(request.get('foo'), 32) + self.assertEqual(request.get('bar', 36), 36) + request.clear() + finally: + self.mytask.pop_request() + + def test_task_class_repr(self): + self.assertIn('class Task of', repr(self.mytask.app.Task)) + self.mytask.app.Task._app = None + self.assertIn('unbound', repr(self.mytask.app.Task, )) + + def test_bind_no_magic_kwargs(self): + self.mytask.accept_magic_kwargs = None + self.mytask.bind(self.mytask.app) + + def test_annotate(self): + with patch('celery.app.task.resolve_all_annotations') as anno: + anno.return_value = [{'FOO': 'BAR'}] + + @self.app.task(shared=False) + def task(): + pass + task.annotate() + self.assertEqual(task.FOO, 'BAR') + + def test_after_return(self): + self.mytask.push_request() + try: + self.mytask.request.chord = self.mytask.s() + self.mytask.after_return('SUCCESS', 1.0, 'foobar', (), {}, None) + self.mytask.request.clear() + finally: + self.mytask.pop_request() + + def test_send_task_sent_event(self): + with self.app.connection() as conn: + self.app.conf.CELERY_SEND_TASK_SENT_EVENT = True + self.assertTrue(self.app.amqp.TaskProducer(conn).send_sent_event) + + def test_update_state(self): + + @self.app.task(shared=False) + def yyy(): + pass + + yyy.push_request() + try: + tid = uuid() + yyy.update_state(tid, 'FROBULATING', {'fooz': 'baaz'}) + self.assertEqual(yyy.AsyncResult(tid).status, 'FROBULATING') + self.assertDictEqual(yyy.AsyncResult(tid).result, {'fooz': 'baaz'}) + + yyy.request.id = tid + yyy.update_state(state='FROBUZATING', meta={'fooz': 'baaz'}) + self.assertEqual(yyy.AsyncResult(tid).status, 'FROBUZATING') + self.assertDictEqual(yyy.AsyncResult(tid).result, {'fooz': 'baaz'}) + finally: + yyy.pop_request() + + def test_repr(self): + + @self.app.task(shared=False) + def task_test_repr(): + pass + + self.assertIn('task_test_repr', repr(task_test_repr)) + + def test_has___name__(self): + + @self.app.task(shared=False) + def yyy2(): + pass + + self.assertTrue(yyy2.__name__) + + +class test_apply_task(TasksCase): + + def test_apply_throw(self): + with self.assertRaises(KeyError): + self.raising.apply(throw=True) + + def test_apply_with_CELERY_EAGER_PROPAGATES_EXCEPTIONS(self): + self.app.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True + with self.assertRaises(KeyError): + self.raising.apply() + + def test_apply(self): + self.increment_counter.count = 0 + + e = self.increment_counter.apply() + self.assertIsInstance(e, EagerResult) + self.assertEqual(e.get(), 1) + + e = self.increment_counter.apply(args=[1]) + self.assertEqual(e.get(), 2) + + e = self.increment_counter.apply(kwargs={'increment_by': 4}) + self.assertEqual(e.get(), 6) + + self.assertTrue(e.successful()) + self.assertTrue(e.ready()) + self.assertTrue(repr(e).startswith('> 2, Proxy(lambda: 2)) + self.assertEqual(Proxy(lambda: 10) ^ 7, Proxy(lambda: 13)) + self.assertEqual(Proxy(lambda: 10) | 40, Proxy(lambda: 42)) + self.assertEqual(~Proxy(lambda: 10), Proxy(lambda: -11)) + self.assertEqual(-Proxy(lambda: 10), Proxy(lambda: -10)) + self.assertEqual(+Proxy(lambda: -10), Proxy(lambda: -10)) + self.assertTrue(Proxy(lambda: 10) < Proxy(lambda: 20)) + self.assertTrue(Proxy(lambda: 20) > Proxy(lambda: 10)) + self.assertTrue(Proxy(lambda: 10) >= Proxy(lambda: 10)) + self.assertTrue(Proxy(lambda: 10) <= Proxy(lambda: 10)) + self.assertTrue(Proxy(lambda: 10) == Proxy(lambda: 10)) + self.assertTrue(Proxy(lambda: 20) != Proxy(lambda: 10)) + self.assertTrue(Proxy(lambda: 100).__divmod__(30)) + self.assertTrue(Proxy(lambda: 100).__truediv__(30)) + self.assertTrue(abs(Proxy(lambda: -100))) + + x = Proxy(lambda: 10) + x -= 1 + self.assertEqual(x, 9) + x = Proxy(lambda: 9) + x += 1 + self.assertEqual(x, 10) + x = Proxy(lambda: 10) + x *= 2 + self.assertEqual(x, 20) + x = Proxy(lambda: 20) + x /= 2 + self.assertEqual(x, 10) + x = Proxy(lambda: 10) + x %= 2 + self.assertEqual(x, 0) + x = Proxy(lambda: 10) + x <<= 3 + self.assertEqual(x, 80) + x = Proxy(lambda: 80) + x >>= 4 + self.assertEqual(x, 5) + x = Proxy(lambda: 5) + x ^= 1 + self.assertEqual(x, 4) + x = Proxy(lambda: 4) + x **= 4 + self.assertEqual(x, 256) + x = Proxy(lambda: 256) + x //= 2 + self.assertEqual(x, 128) + x = Proxy(lambda: 128) + x |= 2 + self.assertEqual(x, 130) + x = Proxy(lambda: 130) + x &= 10 + self.assertEqual(x, 2) + + x = Proxy(lambda: 10) + self.assertEqual(type(x.__float__()), float) + self.assertEqual(type(x.__int__()), int) + if not PY3: + self.assertEqual(type(x.__long__()), long_t) + self.assertTrue(hex(x)) + self.assertTrue(oct(x)) + + def test_hash(self): + + class X(object): + + def __hash__(self): + return 1234 + + self.assertEqual(hash(Proxy(lambda: X())), 1234) + + def test_call(self): + + class X(object): + + def __call__(self): + return 1234 + + self.assertEqual(Proxy(lambda: X())(), 1234) + + def test_context(self): + + class X(object): + entered = exited = False + + def __enter__(self): + self.entered = True + return 1234 + + def __exit__(self, *exc_info): + self.exited = True + + v = X() + x = Proxy(lambda: v) + with x as val: + self.assertEqual(val, 1234) + self.assertTrue(x.entered) + self.assertTrue(x.exited) + + def test_reduce(self): + + class X(object): + + def __reduce__(self): + return 123 + + x = Proxy(lambda: X()) + self.assertEqual(x.__reduce__(), 123) + + +class test_PromiseProxy(Case): + + def test_only_evaluated_once(self): + + class X(object): + attr = 123 + evals = 0 + + def __init__(self): + self.__class__.evals += 1 + + p = PromiseProxy(X) + self.assertEqual(p.attr, 123) + self.assertEqual(p.attr, 123) + self.assertEqual(X.evals, 1) + + def test_callbacks(self): + source = Mock(name='source') + p = PromiseProxy(source) + cbA = Mock(name='cbA') + cbB = Mock(name='cbB') + cbC = Mock(name='cbC') + p.__then__(cbA, p) + p.__then__(cbB, p) + self.assertFalse(p.__evaluated__()) + self.assertTrue(object.__getattribute__(p, '__pending__')) + + self.assertTrue(repr(p)) + self.assertTrue(p.__evaluated__()) + with self.assertRaises(AttributeError): + object.__getattribute__(p, '__pending__') + cbA.assert_called_with(p) + cbB.assert_called_with(p) + + self.assertTrue(p.__evaluated__()) + p.__then__(cbC, p) + cbC.assert_called_with(p) + + with self.assertRaises(AttributeError): + object.__getattribute__(p, '__pending__') + + def test_maybe_evaluate(self): + x = PromiseProxy(lambda: 30) + self.assertFalse(x.__evaluated__()) + self.assertEqual(maybe_evaluate(x), 30) + self.assertEqual(maybe_evaluate(x), 30) + + self.assertEqual(maybe_evaluate(30), 30) + self.assertTrue(x.__evaluated__()) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_mail.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_mail.py new file mode 100644 index 0000000..e4fc965 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_mail.py @@ -0,0 +1,53 @@ +from __future__ import absolute_import + +from celery.utils.mail import Message, Mailer, SSLError + +from celery.tests.case import Case, Mock, patch + + +msg = Message(to='george@vandelay.com', sender='elaine@pendant.com', + subject="What's up with Jerry?", body='???!') + + +class test_Message(Case): + + def test_repr(self): + self.assertTrue(repr(msg)) + + def test_str(self): + self.assertTrue(str(msg)) + + +class test_Mailer(Case): + + def test_send_wrapper(self): + mailer = Mailer() + mailer._send = Mock() + mailer.send(msg) + mailer._send.assert_called_with(msg) + + @patch('smtplib.SMTP_SSL', create=True) + def test_send_ssl_tls(self, SMTP_SSL): + mailer = Mailer(use_ssl=True, use_tls=True) + client = SMTP_SSL.return_value = Mock() + mailer._send(msg) + self.assertTrue(client.starttls.called) + self.assertEqual(client.ehlo.call_count, 2) + client.quit.assert_called_with() + client.sendmail.assert_called_with(msg.sender, msg.to, str(msg)) + mailer = Mailer(use_ssl=True, use_tls=True, user='foo', + password='bar') + mailer._send(msg) + client.login.assert_called_with('foo', 'bar') + + @patch('smtplib.SMTP') + def test_send(self, SMTP): + client = SMTP.return_value = Mock() + mailer = Mailer(use_ssl=False, use_tls=False) + mailer._send(msg) + + client.sendmail.assert_called_with(msg.sender, msg.to, str(msg)) + + client.quit.side_effect = SSLError() + mailer._send(msg) + client.close.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_pickle.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_pickle.py new file mode 100644 index 0000000..6b65bb3 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_pickle.py @@ -0,0 +1,51 @@ +from __future__ import absolute_import + +from celery.utils.serialization import pickle +from celery.tests.case import Case + + +class RegularException(Exception): + pass + + +class ArgOverrideException(Exception): + + def __init__(self, message, status_code=10): + self.status_code = status_code + Exception.__init__(self, message, status_code) + + +class test_Pickle(Case): + + def test_pickle_regular_exception(self): + exc = None + try: + raise RegularException('RegularException raised') + except RegularException as exc_: + exc = exc_ + + pickled = pickle.dumps({'exception': exc}) + unpickled = pickle.loads(pickled) + exception = unpickled.get('exception') + self.assertTrue(exception) + self.assertIsInstance(exception, RegularException) + self.assertTupleEqual(exception.args, ('RegularException raised', )) + + def test_pickle_arg_override_exception(self): + + exc = None + try: + raise ArgOverrideException( + 'ArgOverrideException raised', status_code=100, + ) + except ArgOverrideException as exc_: + exc = exc_ + + pickled = pickle.dumps({'exception': exc}) + unpickled = pickle.loads(pickled) + exception = unpickled.get('exception') + self.assertTrue(exception) + self.assertIsInstance(exception, ArgOverrideException) + self.assertTupleEqual(exception.args, ( + 'ArgOverrideException raised', 100)) + self.assertEqual(exception.status_code, 100) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_platforms.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_platforms.py new file mode 100644 index 0000000..4f2c584 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_platforms.py @@ -0,0 +1,713 @@ +from __future__ import absolute_import + +import errno +import os +import sys +import signal +import tempfile + +from celery import _find_option_with_arg +from celery import platforms +from celery.five import open_fqdn +from celery.platforms import ( + get_fdmax, + ignore_errno, + set_process_title, + signals, + maybe_drop_privileges, + setuid, + setgid, + initgroups, + parse_uid, + parse_gid, + detached, + DaemonContext, + create_pidlock, + Pidfile, + LockFailed, + setgroups, + _setgroups_hack, + close_open_fds, + fd_by_path, +) + +try: + import resource +except ImportError: # pragma: no cover + resource = None # noqa + +from celery.tests.case import ( + Case, WhateverIO, Mock, SkipTest, + call, override_stdouts, mock_open, patch, +) + + +class test_find_option_with_arg(Case): + + def test_long_opt(self): + self.assertEqual( + _find_option_with_arg(['--foo=bar'], long_opts=['--foo']), + 'bar' + ) + + def test_short_opt(self): + self.assertEqual( + _find_option_with_arg(['-f', 'bar'], short_opts=['-f']), + 'bar' + ) + + +class test_fd_by_path(Case): + + def test_finds(self): + test_file = tempfile.NamedTemporaryFile() + keep = fd_by_path([test_file.name]) + self.assertEqual(keep, [test_file.file.fileno()]) + test_file.close() + + +class test_close_open_fds(Case): + + def test_closes(self): + with patch('os.close') as _close: + with patch('os.closerange', create=True) as closerange: + with patch('celery.platforms.get_fdmax') as fdmax: + fdmax.return_value = 3 + close_open_fds() + if not closerange.called: + _close.assert_has_calls([call(2), call(1), call(0)]) + _close.side_effect = OSError() + _close.side_effect.errno = errno.EBADF + close_open_fds() + + +class test_ignore_errno(Case): + + def test_raises_EBADF(self): + with ignore_errno('EBADF'): + exc = OSError() + exc.errno = errno.EBADF + raise exc + + def test_otherwise(self): + with self.assertRaises(OSError): + with ignore_errno('EBADF'): + exc = OSError() + exc.errno = errno.ENOENT + raise exc + + +class test_set_process_title(Case): + + def when_no_setps(self): + prev = platforms._setproctitle = platforms._setproctitle, None + try: + set_process_title('foo') + finally: + platforms._setproctitle = prev + + +class test_Signals(Case): + + @patch('signal.getsignal') + def test_getitem(self, getsignal): + signals['SIGINT'] + getsignal.assert_called_with(signal.SIGINT) + + def test_supported(self): + self.assertTrue(signals.supported('INT')) + self.assertFalse(signals.supported('SIGIMAGINARY')) + + def test_reset_alarm(self): + if sys.platform == 'win32': + raise SkipTest('signal.alarm not available on Windows') + with patch('signal.alarm') as _alarm: + signals.reset_alarm() + _alarm.assert_called_with(0) + + def test_arm_alarm(self): + if hasattr(signal, 'setitimer'): + with patch('signal.setitimer', create=True) as seti: + signals.arm_alarm(30) + self.assertTrue(seti.called) + + def test_signum(self): + self.assertEqual(signals.signum(13), 13) + self.assertEqual(signals.signum('INT'), signal.SIGINT) + self.assertEqual(signals.signum('SIGINT'), signal.SIGINT) + with self.assertRaises(TypeError): + signals.signum('int') + signals.signum(object()) + + @patch('signal.signal') + def test_ignore(self, set): + signals.ignore('SIGINT') + set.assert_called_with(signals.signum('INT'), signals.ignored) + signals.ignore('SIGTERM') + set.assert_called_with(signals.signum('TERM'), signals.ignored) + + @patch('signal.signal') + def test_setitem(self, set): + def handle(*a): + return a + signals['INT'] = handle + set.assert_called_with(signal.SIGINT, handle) + + @patch('signal.signal') + def test_setitem_raises(self, set): + set.side_effect = ValueError() + signals['INT'] = lambda *a: a + + +if not platforms.IS_WINDOWS: + + class test_get_fdmax(Case): + + @patch('resource.getrlimit') + def test_when_infinity(self, getrlimit): + with patch('os.sysconf') as sysconfig: + sysconfig.side_effect = KeyError() + getrlimit.return_value = [None, resource.RLIM_INFINITY] + default = object() + self.assertIs(get_fdmax(default), default) + + @patch('resource.getrlimit') + def test_when_actual(self, getrlimit): + with patch('os.sysconf') as sysconfig: + sysconfig.side_effect = KeyError() + getrlimit.return_value = [None, 13] + self.assertEqual(get_fdmax(None), 13) + + class test_maybe_drop_privileges(Case): + + @patch('celery.platforms.parse_uid') + @patch('pwd.getpwuid') + @patch('celery.platforms.setgid') + @patch('celery.platforms.setuid') + @patch('celery.platforms.initgroups') + def test_with_uid(self, initgroups, setuid, setgid, + getpwuid, parse_uid): + + class pw_struct(object): + pw_gid = 50001 + + def raise_on_second_call(*args, **kwargs): + setuid.side_effect = OSError() + setuid.side_effect.errno = errno.EPERM + setuid.side_effect = raise_on_second_call + getpwuid.return_value = pw_struct() + parse_uid.return_value = 5001 + maybe_drop_privileges(uid='user') + parse_uid.assert_called_with('user') + getpwuid.assert_called_with(5001) + setgid.assert_called_with(50001) + initgroups.assert_called_with(5001, 50001) + setuid.assert_has_calls([call(5001), call(0)]) + + @patch('celery.platforms.parse_uid') + @patch('celery.platforms.parse_gid') + @patch('celery.platforms.setgid') + @patch('celery.platforms.setuid') + @patch('celery.platforms.initgroups') + def test_with_guid(self, initgroups, setuid, setgid, + parse_gid, parse_uid): + + def raise_on_second_call(*args, **kwargs): + setuid.side_effect = OSError() + setuid.side_effect.errno = errno.EPERM + setuid.side_effect = raise_on_second_call + parse_uid.return_value = 5001 + parse_gid.return_value = 50001 + maybe_drop_privileges(uid='user', gid='group') + parse_uid.assert_called_with('user') + parse_gid.assert_called_with('group') + setgid.assert_called_with(50001) + initgroups.assert_called_with(5001, 50001) + setuid.assert_has_calls([call(5001), call(0)]) + + setuid.side_effect = None + with self.assertRaises(RuntimeError): + maybe_drop_privileges(uid='user', gid='group') + setuid.side_effect = OSError() + setuid.side_effect.errno = errno.EINVAL + with self.assertRaises(OSError): + maybe_drop_privileges(uid='user', gid='group') + + @patch('celery.platforms.setuid') + @patch('celery.platforms.setgid') + @patch('celery.platforms.parse_gid') + def test_only_gid(self, parse_gid, setgid, setuid): + parse_gid.return_value = 50001 + maybe_drop_privileges(gid='group') + parse_gid.assert_called_with('group') + setgid.assert_called_with(50001) + self.assertFalse(setuid.called) + + class test_setget_uid_gid(Case): + + @patch('celery.platforms.parse_uid') + @patch('os.setuid') + def test_setuid(self, _setuid, parse_uid): + parse_uid.return_value = 5001 + setuid('user') + parse_uid.assert_called_with('user') + _setuid.assert_called_with(5001) + + @patch('celery.platforms.parse_gid') + @patch('os.setgid') + def test_setgid(self, _setgid, parse_gid): + parse_gid.return_value = 50001 + setgid('group') + parse_gid.assert_called_with('group') + _setgid.assert_called_with(50001) + + def test_parse_uid_when_int(self): + self.assertEqual(parse_uid(5001), 5001) + + @patch('pwd.getpwnam') + def test_parse_uid_when_existing_name(self, getpwnam): + + class pwent(object): + pw_uid = 5001 + + getpwnam.return_value = pwent() + self.assertEqual(parse_uid('user'), 5001) + + @patch('pwd.getpwnam') + def test_parse_uid_when_nonexisting_name(self, getpwnam): + getpwnam.side_effect = KeyError('user') + + with self.assertRaises(KeyError): + parse_uid('user') + + def test_parse_gid_when_int(self): + self.assertEqual(parse_gid(50001), 50001) + + @patch('grp.getgrnam') + def test_parse_gid_when_existing_name(self, getgrnam): + + class grent(object): + gr_gid = 50001 + + getgrnam.return_value = grent() + self.assertEqual(parse_gid('group'), 50001) + + @patch('grp.getgrnam') + def test_parse_gid_when_nonexisting_name(self, getgrnam): + getgrnam.side_effect = KeyError('group') + + with self.assertRaises(KeyError): + parse_gid('group') + + class test_initgroups(Case): + + @patch('pwd.getpwuid') + @patch('os.initgroups', create=True) + def test_with_initgroups(self, initgroups_, getpwuid): + getpwuid.return_value = ['user'] + initgroups(5001, 50001) + initgroups_.assert_called_with('user', 50001) + + @patch('celery.platforms.setgroups') + @patch('grp.getgrall') + @patch('pwd.getpwuid') + def test_without_initgroups(self, getpwuid, getgrall, setgroups): + prev = getattr(os, 'initgroups', None) + try: + delattr(os, 'initgroups') + except AttributeError: + pass + try: + getpwuid.return_value = ['user'] + + class grent(object): + gr_mem = ['user'] + + def __init__(self, gid): + self.gr_gid = gid + + getgrall.return_value = [grent(1), grent(2), grent(3)] + initgroups(5001, 50001) + setgroups.assert_called_with([1, 2, 3]) + finally: + if prev: + os.initgroups = prev + + class test_detached(Case): + + def test_without_resource(self): + prev, platforms.resource = platforms.resource, None + try: + with self.assertRaises(RuntimeError): + detached() + finally: + platforms.resource = prev + + @patch('celery.platforms._create_pidlock') + @patch('celery.platforms.signals') + @patch('celery.platforms.maybe_drop_privileges') + @patch('os.geteuid') + @patch(open_fqdn) + def test_default(self, open, geteuid, maybe_drop, + signals, pidlock): + geteuid.return_value = 0 + context = detached(uid='user', gid='group') + self.assertIsInstance(context, DaemonContext) + signals.reset.assert_called_with('SIGCLD') + maybe_drop.assert_called_with(uid='user', gid='group') + open.return_value = Mock() + + geteuid.return_value = 5001 + context = detached(uid='user', gid='group', logfile='/foo/bar') + self.assertIsInstance(context, DaemonContext) + self.assertTrue(context.after_chdir) + context.after_chdir() + open.assert_called_with('/foo/bar', 'a') + open.return_value.close.assert_called_with() + + context = detached(pidfile='/foo/bar/pid') + self.assertIsInstance(context, DaemonContext) + self.assertTrue(context.after_chdir) + context.after_chdir() + pidlock.assert_called_with('/foo/bar/pid') + + class test_DaemonContext(Case): + + @patch('os.fork') + @patch('os.setsid') + @patch('os._exit') + @patch('os.chdir') + @patch('os.umask') + @patch('os.close') + @patch('os.closerange') + @patch('os.open') + @patch('os.dup2') + def test_open(self, dup2, open, close, closer, umask, chdir, + _exit, setsid, fork): + x = DaemonContext(workdir='/opt/workdir', umask=0o22) + x.stdfds = [0, 1, 2] + + fork.return_value = 0 + with x: + self.assertTrue(x._is_open) + with x: + pass + self.assertEqual(fork.call_count, 2) + setsid.assert_called_with() + self.assertFalse(_exit.called) + + chdir.assert_called_with(x.workdir) + umask.assert_called_with(0o22) + self.assertTrue(dup2.called) + + fork.reset_mock() + fork.return_value = 1 + x = DaemonContext(workdir='/opt/workdir') + x.stdfds = [0, 1, 2] + with x: + pass + self.assertEqual(fork.call_count, 1) + _exit.assert_called_with(0) + + x = DaemonContext(workdir='/opt/workdir', fake=True) + x.stdfds = [0, 1, 2] + x._detach = Mock() + with x: + pass + self.assertFalse(x._detach.called) + + x.after_chdir = Mock() + with x: + pass + x.after_chdir.assert_called_with() + + class test_Pidfile(Case): + + @patch('celery.platforms.Pidfile') + def test_create_pidlock(self, Pidfile): + p = Pidfile.return_value = Mock() + p.is_locked.return_value = True + p.remove_if_stale.return_value = False + with override_stdouts() as (_, err): + with self.assertRaises(SystemExit): + create_pidlock('/var/pid') + self.assertIn('already exists', err.getvalue()) + + p.remove_if_stale.return_value = True + ret = create_pidlock('/var/pid') + self.assertIs(ret, p) + + def test_context(self): + p = Pidfile('/var/pid') + p.write_pid = Mock() + p.remove = Mock() + + with p as _p: + self.assertIs(_p, p) + p.write_pid.assert_called_with() + p.remove.assert_called_with() + + def test_acquire_raises_LockFailed(self): + p = Pidfile('/var/pid') + p.write_pid = Mock() + p.write_pid.side_effect = OSError() + + with self.assertRaises(LockFailed): + with p: + pass + + @patch('os.path.exists') + def test_is_locked(self, exists): + p = Pidfile('/var/pid') + exists.return_value = True + self.assertTrue(p.is_locked()) + exists.return_value = False + self.assertFalse(p.is_locked()) + + def test_read_pid(self): + with mock_open() as s: + s.write('1816\n') + s.seek(0) + p = Pidfile('/var/pid') + self.assertEqual(p.read_pid(), 1816) + + def test_read_pid_partially_written(self): + with mock_open() as s: + s.write('1816') + s.seek(0) + p = Pidfile('/var/pid') + with self.assertRaises(ValueError): + p.read_pid() + + def test_read_pid_raises_ENOENT(self): + exc = IOError() + exc.errno = errno.ENOENT + with mock_open(side_effect=exc): + p = Pidfile('/var/pid') + self.assertIsNone(p.read_pid()) + + def test_read_pid_raises_IOError(self): + exc = IOError() + exc.errno = errno.EAGAIN + with mock_open(side_effect=exc): + p = Pidfile('/var/pid') + with self.assertRaises(IOError): + p.read_pid() + + def test_read_pid_bogus_pidfile(self): + with mock_open() as s: + s.write('eighteensixteen\n') + s.seek(0) + p = Pidfile('/var/pid') + with self.assertRaises(ValueError): + p.read_pid() + + @patch('os.unlink') + def test_remove(self, unlink): + unlink.return_value = True + p = Pidfile('/var/pid') + p.remove() + unlink.assert_called_with(p.path) + + @patch('os.unlink') + def test_remove_ENOENT(self, unlink): + exc = OSError() + exc.errno = errno.ENOENT + unlink.side_effect = exc + p = Pidfile('/var/pid') + p.remove() + unlink.assert_called_with(p.path) + + @patch('os.unlink') + def test_remove_EACCES(self, unlink): + exc = OSError() + exc.errno = errno.EACCES + unlink.side_effect = exc + p = Pidfile('/var/pid') + p.remove() + unlink.assert_called_with(p.path) + + @patch('os.unlink') + def test_remove_OSError(self, unlink): + exc = OSError() + exc.errno = errno.EAGAIN + unlink.side_effect = exc + p = Pidfile('/var/pid') + with self.assertRaises(OSError): + p.remove() + unlink.assert_called_with(p.path) + + @patch('os.kill') + def test_remove_if_stale_process_alive(self, kill): + p = Pidfile('/var/pid') + p.read_pid = Mock() + p.read_pid.return_value = 1816 + kill.return_value = 0 + self.assertFalse(p.remove_if_stale()) + kill.assert_called_with(1816, 0) + p.read_pid.assert_called_with() + + kill.side_effect = OSError() + kill.side_effect.errno = errno.ENOENT + self.assertFalse(p.remove_if_stale()) + + @patch('os.kill') + def test_remove_if_stale_process_dead(self, kill): + with override_stdouts(): + p = Pidfile('/var/pid') + p.read_pid = Mock() + p.read_pid.return_value = 1816 + p.remove = Mock() + exc = OSError() + exc.errno = errno.ESRCH + kill.side_effect = exc + self.assertTrue(p.remove_if_stale()) + kill.assert_called_with(1816, 0) + p.remove.assert_called_with() + + def test_remove_if_stale_broken_pid(self): + with override_stdouts(): + p = Pidfile('/var/pid') + p.read_pid = Mock() + p.read_pid.side_effect = ValueError() + p.remove = Mock() + + self.assertTrue(p.remove_if_stale()) + p.remove.assert_called_with() + + def test_remove_if_stale_no_pidfile(self): + p = Pidfile('/var/pid') + p.read_pid = Mock() + p.read_pid.return_value = None + p.remove = Mock() + + self.assertTrue(p.remove_if_stale()) + p.remove.assert_called_with() + + @patch('os.fsync') + @patch('os.getpid') + @patch('os.open') + @patch('os.fdopen') + @patch(open_fqdn) + def test_write_pid(self, open_, fdopen, osopen, getpid, fsync): + getpid.return_value = 1816 + osopen.return_value = 13 + w = fdopen.return_value = WhateverIO() + w.close = Mock() + r = open_.return_value = WhateverIO() + r.write('1816\n') + r.seek(0) + + p = Pidfile('/var/pid') + p.write_pid() + w.seek(0) + self.assertEqual(w.readline(), '1816\n') + self.assertTrue(w.close.called) + getpid.assert_called_with() + osopen.assert_called_with(p.path, platforms.PIDFILE_FLAGS, + platforms.PIDFILE_MODE) + fdopen.assert_called_with(13, 'w') + fsync.assert_called_with(13) + open_.assert_called_with(p.path) + + @patch('os.fsync') + @patch('os.getpid') + @patch('os.open') + @patch('os.fdopen') + @patch(open_fqdn) + def test_write_reread_fails(self, open_, fdopen, + osopen, getpid, fsync): + getpid.return_value = 1816 + osopen.return_value = 13 + w = fdopen.return_value = WhateverIO() + w.close = Mock() + r = open_.return_value = WhateverIO() + r.write('11816\n') + r.seek(0) + + p = Pidfile('/var/pid') + with self.assertRaises(LockFailed): + p.write_pid() + + class test_setgroups(Case): + + @patch('os.setgroups', create=True) + def test_setgroups_hack_ValueError(self, setgroups): + + def on_setgroups(groups): + if len(groups) <= 200: + setgroups.return_value = True + return + raise ValueError() + setgroups.side_effect = on_setgroups + _setgroups_hack(list(range(400))) + + setgroups.side_effect = ValueError() + with self.assertRaises(ValueError): + _setgroups_hack(list(range(400))) + + @patch('os.setgroups', create=True) + def test_setgroups_hack_OSError(self, setgroups): + exc = OSError() + exc.errno = errno.EINVAL + + def on_setgroups(groups): + if len(groups) <= 200: + setgroups.return_value = True + return + raise exc + setgroups.side_effect = on_setgroups + + _setgroups_hack(list(range(400))) + + setgroups.side_effect = exc + with self.assertRaises(OSError): + _setgroups_hack(list(range(400))) + + exc2 = OSError() + exc.errno = errno.ESRCH + setgroups.side_effect = exc2 + with self.assertRaises(OSError): + _setgroups_hack(list(range(400))) + + @patch('os.sysconf') + @patch('celery.platforms._setgroups_hack') + def test_setgroups(self, hack, sysconf): + sysconf.return_value = 100 + setgroups(list(range(400))) + hack.assert_called_with(list(range(100))) + + @patch('os.sysconf') + @patch('celery.platforms._setgroups_hack') + def test_setgroups_sysconf_raises(self, hack, sysconf): + sysconf.side_effect = ValueError() + setgroups(list(range(400))) + hack.assert_called_with(list(range(400))) + + @patch('os.getgroups') + @patch('os.sysconf') + @patch('celery.platforms._setgroups_hack') + def test_setgroups_raises_ESRCH(self, hack, sysconf, getgroups): + sysconf.side_effect = ValueError() + esrch = OSError() + esrch.errno = errno.ESRCH + hack.side_effect = esrch + with self.assertRaises(OSError): + setgroups(list(range(400))) + + @patch('os.getgroups') + @patch('os.sysconf') + @patch('celery.platforms._setgroups_hack') + def test_setgroups_raises_EPERM(self, hack, sysconf, getgroups): + sysconf.side_effect = ValueError() + eperm = OSError() + eperm.errno = errno.EPERM + hack.side_effect = eperm + getgroups.return_value = list(range(400)) + setgroups(list(range(400))) + getgroups.assert_called_with() + + getgroups.return_value = [1000] + with self.assertRaises(OSError): + setgroups(list(range(400))) + getgroups.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_saferef.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_saferef.py new file mode 100644 index 0000000..9c18d71 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_saferef.py @@ -0,0 +1,94 @@ +from __future__ import absolute_import + +from celery.five import range +from celery.utils.dispatch.saferef import safe_ref +from celery.tests.case import Case + + +class Class1(object): + + def x(self): + pass + + +def fun(obj): + pass + + +class Class2(object): + + def __call__(self, obj): + pass + + +class SaferefTests(Case): + + def setUp(self): + ts = [] + ss = [] + for x in range(5000): + t = Class1() + ts.append(t) + s = safe_ref(t.x, self._closure) + ss.append(s) + ts.append(fun) + ss.append(safe_ref(fun, self._closure)) + for x in range(30): + t = Class2() + ts.append(t) + s = safe_ref(t, self._closure) + ss.append(s) + self.ts = ts + self.ss = ss + self.closureCount = 0 + + def tearDown(self): + del self.ts + del self.ss + + def test_in(self): + """test_in + + Test the "in" operator for safe references (cmp) + + """ + for t in self.ts[:50]: + self.assertTrue(safe_ref(t.x) in self.ss) + + def test_valid(self): + """test_value + + Test that the references are valid (return instance methods) + + """ + for s in self.ss: + self.assertTrue(s()) + + def test_shortcircuit(self): + """test_shortcircuit + + Test that creation short-circuits to reuse existing references + + """ + sd = {} + for s in self.ss: + sd[s] = 1 + for t in self.ts: + if hasattr(t, 'x'): + self.assertIn(safe_ref(t.x), sd) + else: + self.assertIn(safe_ref(t), sd) + + def test_representation(self): + """test_representation + + Test that the reference object's representation works + + XXX Doesn't currently check the results, just that no error + is raised + """ + repr(self.ss[-1]) + + def _closure(self, ref): + """Dumb utility mechanism to increment deletion counter""" + self.closureCount += 1 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_serialization.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_serialization.py new file mode 100644 index 0000000..53dfdad --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_serialization.py @@ -0,0 +1,42 @@ +from __future__ import absolute_import + +import sys + +from celery.utils.serialization import ( + UnpickleableExceptionWrapper, + get_pickleable_etype, +) + +from celery.tests.case import Case, mask_modules + + +class test_AAPickle(Case): + + def test_no_cpickle(self): + prev = sys.modules.pop('celery.utils.serialization', None) + try: + with mask_modules('cPickle'): + from celery.utils.serialization import pickle + import pickle as orig_pickle + self.assertIs(pickle.dumps, orig_pickle.dumps) + finally: + sys.modules['celery.utils.serialization'] = prev + + +class test_UnpickleExceptionWrapper(Case): + + def test_init(self): + x = UnpickleableExceptionWrapper('foo', 'Bar', [10, lambda x: x]) + self.assertTrue(x.exc_args) + self.assertEqual(len(x.exc_args), 2) + + +class test_get_pickleable_etype(Case): + + def test_get_pickleable_etype(self): + + class Unpickleable(Exception): + def __reduce__(self): + raise ValueError('foo') + + self.assertIs(get_pickleable_etype(Unpickleable), Exception) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_sysinfo.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_sysinfo.py new file mode 100644 index 0000000..4cd32c7 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_sysinfo.py @@ -0,0 +1,33 @@ +from __future__ import absolute_import + +import os + +from celery.utils.sysinfo import load_average, df + +from celery.tests.case import Case, SkipTest, patch + + +class test_load_average(Case): + + def test_avg(self): + if not hasattr(os, 'getloadavg'): + raise SkipTest('getloadavg not available') + with patch('os.getloadavg') as getloadavg: + getloadavg.return_value = 0.54736328125, 0.6357421875, 0.69921875 + l = load_average() + self.assertTrue(l) + self.assertEqual(l, (0.55, 0.64, 0.7)) + + +class test_df(Case): + + def test_df(self): + try: + from posix import statvfs_result # noqa + except ImportError: + raise SkipTest('statvfs not available') + x = df('/') + self.assertTrue(x.total_blocks) + self.assertTrue(x.available) + self.assertTrue(x.capacity) + self.assertTrue(x.stat) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_term.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_term.py new file mode 100644 index 0000000..1bd7e43 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_term.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import, unicode_literals + +import sys + +from celery.utils import term +from celery.utils.term import colored, fg +from celery.five import text_t + +from celery.tests.case import Case, SkipTest + + +class test_colored(Case): + + def setUp(self): + if sys.platform == 'win32': + raise SkipTest('Colors not supported on Windows') + + self._prev_encoding = sys.getdefaultencoding + + def getdefaultencoding(): + return 'utf-8' + + sys.getdefaultencoding = getdefaultencoding + + def tearDown(self): + sys.getdefaultencoding = self._prev_encoding + + def test_colors(self): + colors = ( + ('black', term.BLACK), + ('red', term.RED), + ('green', term.GREEN), + ('yellow', term.YELLOW), + ('blue', term.BLUE), + ('magenta', term.MAGENTA), + ('cyan', term.CYAN), + ('white', term.WHITE), + ) + + for name, key in colors: + self.assertIn(fg(30 + key), str(colored().names[name]('foo'))) + + self.assertTrue(str(colored().bold('f'))) + self.assertTrue(str(colored().underline('f'))) + self.assertTrue(str(colored().blink('f'))) + self.assertTrue(str(colored().reverse('f'))) + self.assertTrue(str(colored().bright('f'))) + self.assertTrue(str(colored().ired('f'))) + self.assertTrue(str(colored().igreen('f'))) + self.assertTrue(str(colored().iyellow('f'))) + self.assertTrue(str(colored().iblue('f'))) + self.assertTrue(str(colored().imagenta('f'))) + self.assertTrue(str(colored().icyan('f'))) + self.assertTrue(str(colored().iwhite('f'))) + self.assertTrue(str(colored().reset('f'))) + + self.assertTrue(text_t(colored().green('∂bar'))) + + self.assertTrue( + colored().red('éefoo') + colored().green('∂bar')) + + self.assertEqual( + colored().red('foo').no_color(), 'foo') + + self.assertTrue( + repr(colored().blue('åfoo'))) + + self.assertIn("''", repr(colored())) + + c = colored() + s = c.red('foo', c.blue('bar'), c.green('baz')) + self.assertTrue(s.no_color()) + + c._fold_no_color(s, 'øfoo') + c._fold_no_color('fooå', s) + + c = colored().red('åfoo') + self.assertEqual( + c._add(c, 'baræ'), + '\x1b[1;31m\xe5foo\x1b[0mbar\xe6', + ) + + c2 = colored().blue('ƒƒz') + c3 = c._add(c, c2) + self.assertEqual( + c3, + '\x1b[1;31m\xe5foo\x1b[0m\x1b[1;34m\u0192\u0192z\x1b[0m', + ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_text.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_text.py new file mode 100644 index 0000000..383bdb6 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_text.py @@ -0,0 +1,88 @@ +from __future__ import absolute_import + +from celery.utils.text import ( + indent, + ensure_2lines, + abbr, + truncate, + abbrtask, + pretty, +) +from celery.tests.case import AppCase, Case + +RANDTEXT = """\ +The quick brown +fox jumps +over the +lazy dog\ +""" + +RANDTEXT_RES = """\ + The quick brown + fox jumps + over the + lazy dog\ +""" + +QUEUES = { + 'queue1': { + 'exchange': 'exchange1', + 'exchange_type': 'type1', + 'routing_key': 'bind1', + }, + 'queue2': { + 'exchange': 'exchange2', + 'exchange_type': 'type2', + 'routing_key': 'bind2', + }, +} + + +QUEUE_FORMAT1 = '.> queue1 exchange=exchange1(type1) key=bind1' +QUEUE_FORMAT2 = '.> queue2 exchange=exchange2(type2) key=bind2' + + +class test_Info(AppCase): + + def test_textindent(self): + self.assertEqual(indent(RANDTEXT, 4), RANDTEXT_RES) + + def test_format_queues(self): + self.app.amqp.queues = self.app.amqp.Queues(QUEUES) + self.assertEqual(sorted(self.app.amqp.queues.format().split('\n')), + sorted([QUEUE_FORMAT1, QUEUE_FORMAT2])) + + def test_ensure_2lines(self): + self.assertEqual( + len(ensure_2lines('foo\nbar\nbaz\n').splitlines()), 3, + ) + self.assertEqual( + len(ensure_2lines('foo\nbar').splitlines()), 2, + ) + + +class test_utils(Case): + + def test_truncate_text(self): + self.assertEqual(truncate('ABCDEFGHI', 3), 'ABC...') + self.assertEqual(truncate('ABCDEFGHI', 10), 'ABCDEFGHI') + + def test_abbr(self): + self.assertEqual(abbr(None, 3), '???') + self.assertEqual(abbr('ABCDEFGHI', 6), 'ABC...') + self.assertEqual(abbr('ABCDEFGHI', 20), 'ABCDEFGHI') + self.assertEqual(abbr('ABCDEFGHI', 6, None), 'ABCDEF') + + def test_abbrtask(self): + self.assertEqual(abbrtask(None, 3), '???') + self.assertEqual( + abbrtask('feeds.tasks.refresh', 10), + '[.]refresh', + ) + self.assertEqual( + abbrtask('feeds.tasks.refresh', 30), + 'feeds.tasks.refresh', + ) + + def test_pretty(self): + self.assertTrue(pretty(('a', 'b', 'c'))) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_threads.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_threads.py new file mode 100644 index 0000000..b7f9c43 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_threads.py @@ -0,0 +1,109 @@ +from __future__ import absolute_import + +from celery.utils.threads import ( + _LocalStack, + _FastLocalStack, + LocalManager, + Local, + bgThread, +) + +from celery.tests.case import Case, override_stdouts, patch + + +class test_bgThread(Case): + + def test_crash(self): + + class T(bgThread): + + def body(self): + raise KeyError() + + with patch('os._exit') as _exit: + with override_stdouts(): + _exit.side_effect = ValueError() + t = T() + with self.assertRaises(ValueError): + t.run() + _exit.assert_called_with(1) + + def test_interface(self): + x = bgThread() + with self.assertRaises(NotImplementedError): + x.body() + + +class test_Local(Case): + + def test_iter(self): + x = Local() + x.foo = 'bar' + ident = x.__ident_func__() + self.assertIn((ident, {'foo': 'bar'}), list(iter(x))) + + delattr(x, 'foo') + self.assertNotIn((ident, {'foo': 'bar'}), list(iter(x))) + with self.assertRaises(AttributeError): + delattr(x, 'foo') + + self.assertIsNotNone(x(lambda: 'foo')) + + +class test_LocalStack(Case): + + def test_stack(self): + x = _LocalStack() + self.assertIsNone(x.pop()) + x.__release_local__() + ident = x.__ident_func__ + x.__ident_func__ = ident + + with self.assertRaises(RuntimeError): + x()[0] + + x.push(['foo']) + self.assertEqual(x()[0], 'foo') + x.pop() + with self.assertRaises(RuntimeError): + x()[0] + + +class test_FastLocalStack(Case): + + def test_stack(self): + x = _FastLocalStack() + x.push(['foo']) + x.push(['bar']) + self.assertEqual(x.top, ['bar']) + self.assertEqual(len(x), 2) + x.pop() + self.assertEqual(x.top, ['foo']) + x.pop() + self.assertIsNone(x.top) + + +class test_LocalManager(Case): + + def test_init(self): + x = LocalManager() + self.assertListEqual(x.locals, []) + self.assertTrue(x.ident_func) + + def ident(): + return 1 + + loc = Local() + x = LocalManager([loc], ident_func=ident) + self.assertListEqual(x.locals, [loc]) + x = LocalManager(loc, ident_func=ident) + self.assertListEqual(x.locals, [loc]) + self.assertIs(x.ident_func, ident) + self.assertIs(x.locals[0].__ident_func__, ident) + self.assertEqual(x.get_ident(), 1) + + with patch('celery.utils.threads.release_local') as release: + x.cleanup() + release.assert_called_with(loc) + + self.assertTrue(repr(x)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timer2.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timer2.py new file mode 100644 index 0000000..cb18c21 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timer2.py @@ -0,0 +1,187 @@ +from __future__ import absolute_import + +import sys +import time + +import celery.utils.timer2 as timer2 + +from celery.tests.case import Case, Mock, patch +from kombu.tests.case import redirect_stdouts + + +class test_Entry(Case): + + def test_call(self): + scratch = [None] + + def timed(x, y, moo='foo'): + scratch[0] = (x, y, moo) + + tref = timer2.Entry(timed, (4, 4), {'moo': 'baz'}) + tref() + + self.assertTupleEqual(scratch[0], (4, 4, 'baz')) + + def test_cancel(self): + tref = timer2.Entry(lambda x: x, (1, ), {}) + tref.cancel() + self.assertTrue(tref.cancelled) + + def test_repr(self): + tref = timer2.Entry(lambda x: x(1, ), {}) + self.assertTrue(repr(tref)) + + +class test_Schedule(Case): + + def test_supports_Timer_interface(self): + x = timer2.Schedule() + x.stop() + + tref = Mock() + x.cancel(tref) + tref.cancel.assert_called_with() + + self.assertIs(x.schedule, x) + + def test_handle_error(self): + from datetime import datetime + scratch = [None] + + def on_error(exc_info): + scratch[0] = exc_info + + s = timer2.Schedule(on_error=on_error) + + with patch('kombu.async.timer.to_timestamp') as tot: + tot.side_effect = OverflowError() + s.enter_at(timer2.Entry(lambda: None, (), {}), + eta=datetime.now()) + s.enter_at(timer2.Entry(lambda: None, (), {}), eta=None) + s.on_error = None + with self.assertRaises(OverflowError): + s.enter_at(timer2.Entry(lambda: None, (), {}), + eta=datetime.now()) + exc = scratch[0] + self.assertIsInstance(exc, OverflowError) + + +class test_Timer(Case): + + def test_enter_after(self): + t = timer2.Timer() + try: + done = [False] + + def set_done(): + done[0] = True + + t.call_after(0.3, set_done) + mss = 0 + while not done[0]: + if mss >= 2.0: + raise Exception('test timed out') + time.sleep(0.1) + mss += 0.1 + finally: + t.stop() + + def test_exit_after(self): + t = timer2.Timer() + t.call_after = Mock() + t.exit_after(0.3, priority=10) + t.call_after.assert_called_with(0.3, sys.exit, 10) + + def test_ensure_started_not_started(self): + t = timer2.Timer() + t.running = True + t.start = Mock() + t.ensure_started() + self.assertFalse(t.start.called) + + def test_call_repeatedly(self): + t = timer2.Timer() + try: + t.schedule.enter_after = Mock() + + myfun = Mock() + myfun.__name__ = 'myfun' + t.call_repeatedly(0.03, myfun) + + self.assertEqual(t.schedule.enter_after.call_count, 1) + args1, _ = t.schedule.enter_after.call_args_list[0] + sec1, tref1, _ = args1 + self.assertEqual(sec1, 0.03) + tref1() + + self.assertEqual(t.schedule.enter_after.call_count, 2) + args2, _ = t.schedule.enter_after.call_args_list[1] + sec2, tref2, _ = args2 + self.assertEqual(sec2, 0.03) + tref2.cancelled = True + tref2() + + self.assertEqual(t.schedule.enter_after.call_count, 2) + finally: + t.stop() + + @patch('kombu.async.timer.logger') + def test_apply_entry_error_handled(self, logger): + t = timer2.Timer() + t.schedule.on_error = None + + fun = Mock() + fun.side_effect = ValueError() + + t.schedule.apply_entry(fun) + self.assertTrue(logger.error.called) + + @redirect_stdouts + def test_apply_entry_error_not_handled(self, stdout, stderr): + t = timer2.Timer() + t.schedule.on_error = Mock() + + fun = Mock() + fun.side_effect = ValueError() + t.schedule.apply_entry(fun) + fun.assert_called_with() + self.assertFalse(stderr.getvalue()) + + @patch('os._exit') + def test_thread_crash(self, _exit): + t = timer2.Timer() + t._next_entry = Mock() + t._next_entry.side_effect = OSError(131) + t.run() + _exit.assert_called_with(1) + + def test_gc_race_lost(self): + t = timer2.Timer() + t._is_stopped.set = Mock() + t._is_stopped.set.side_effect = TypeError() + + t._is_shutdown.set() + t.run() + t._is_stopped.set.assert_called_with() + + def test_to_timestamp(self): + self.assertIs(timer2.to_timestamp(3.13), 3.13) + + def test_test_enter(self): + t = timer2.Timer() + t._do_enter = Mock() + e = Mock() + t.enter(e, 13, 0) + t._do_enter.assert_called_with('enter_at', e, 13, priority=0) + + def test_test_enter_after(self): + t = timer2.Timer() + t._do_enter = Mock() + t.enter_after() + t._do_enter.assert_called_with('enter_after') + + def test_cancel(self): + t = timer2.Timer() + tref = Mock() + t.cancel(tref) + tref.cancel.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timeutils.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timeutils.py new file mode 100644 index 0000000..2258d06 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_timeutils.py @@ -0,0 +1,267 @@ +from __future__ import absolute_import + +import pytz + +from datetime import datetime, timedelta, tzinfo +from pytz import AmbiguousTimeError + +from celery.utils.timeutils import ( + delta_resolution, + humanize_seconds, + maybe_iso8601, + maybe_timedelta, + timedelta_seconds, + timezone, + rate, + remaining, + make_aware, + maybe_make_aware, + localize, + LocalTimezone, + ffwd, + utcoffset, +) +from celery.utils.iso8601 import parse_iso8601 +from celery.tests.case import Case, Mock, patch + + +class test_LocalTimezone(Case): + + def test_daylight(self): + with patch('celery.utils.timeutils._time') as time: + time.timezone = 3600 + time.daylight = False + x = LocalTimezone() + self.assertEqual(x.STDOFFSET, timedelta(seconds=-3600)) + self.assertEqual(x.DSTOFFSET, x.STDOFFSET) + time.daylight = True + time.altzone = 3600 + y = LocalTimezone() + self.assertEqual(y.STDOFFSET, timedelta(seconds=-3600)) + self.assertEqual(y.DSTOFFSET, timedelta(seconds=-3600)) + + self.assertTrue(repr(y)) + + y._isdst = Mock() + y._isdst.return_value = True + self.assertTrue(y.utcoffset(datetime.now())) + self.assertFalse(y.dst(datetime.now())) + y._isdst.return_value = False + self.assertTrue(y.utcoffset(datetime.now())) + self.assertFalse(y.dst(datetime.now())) + + self.assertTrue(y.tzname(datetime.now())) + + +class test_iso8601(Case): + + def test_parse_with_timezone(self): + d = datetime.utcnow().replace(tzinfo=pytz.utc) + self.assertEqual(parse_iso8601(d.isoformat()), d) + # 2013-06-07T20:12:51.775877+00:00 + iso = d.isoformat() + iso1 = iso.replace('+00:00', '-01:00') + d1 = parse_iso8601(iso1) + self.assertEqual(d1.tzinfo._minutes, -60) + iso2 = iso.replace('+00:00', '+01:00') + d2 = parse_iso8601(iso2) + self.assertEqual(d2.tzinfo._minutes, +60) + iso3 = iso.replace('+00:00', 'Z') + d3 = parse_iso8601(iso3) + self.assertEqual(d3.tzinfo, pytz.UTC) + + +class test_timeutils(Case): + + def test_delta_resolution(self): + D = delta_resolution + dt = datetime(2010, 3, 30, 11, 50, 58, 41065) + deltamap = ((timedelta(days=2), datetime(2010, 3, 30, 0, 0)), + (timedelta(hours=2), datetime(2010, 3, 30, 11, 0)), + (timedelta(minutes=2), datetime(2010, 3, 30, 11, 50)), + (timedelta(seconds=2), dt)) + for delta, shoulda in deltamap: + self.assertEqual(D(dt, delta), shoulda) + + def test_timedelta_seconds(self): + deltamap = ((timedelta(seconds=1), 1), + (timedelta(seconds=27), 27), + (timedelta(minutes=3), 3 * 60), + (timedelta(hours=4), 4 * 60 * 60), + (timedelta(days=3), 3 * 86400)) + for delta, seconds in deltamap: + self.assertEqual(timedelta_seconds(delta), seconds) + + def test_timedelta_seconds_returns_0_on_negative_time(self): + delta = timedelta(days=-2) + self.assertEqual(timedelta_seconds(delta), 0) + + def test_humanize_seconds(self): + t = ((4 * 60 * 60 * 24, '4.00 days'), + (1 * 60 * 60 * 24, '1.00 day'), + (4 * 60 * 60, '4.00 hours'), + (1 * 60 * 60, '1.00 hour'), + (4 * 60, '4.00 minutes'), + (1 * 60, '1.00 minute'), + (4, '4.00 seconds'), + (1, '1.00 second'), + (4.3567631221, '4.36 seconds'), + (0, 'now')) + + for seconds, human in t: + self.assertEqual(humanize_seconds(seconds), human) + + self.assertEqual(humanize_seconds(4, prefix='about '), + 'about 4.00 seconds') + + def test_maybe_iso8601_datetime(self): + now = datetime.now() + self.assertIs(maybe_iso8601(now), now) + + def test_maybe_timedelta(self): + D = maybe_timedelta + + for i in (30, 30.6): + self.assertEqual(D(i), timedelta(seconds=i)) + + self.assertEqual(D(timedelta(days=2)), timedelta(days=2)) + + def test_remaining_relative(self): + remaining(datetime.utcnow(), timedelta(hours=1), relative=True) + + +class test_timezone(Case): + + def test_get_timezone_with_pytz(self): + self.assertTrue(timezone.get_timezone('UTC')) + + def test_tz_or_local(self): + self.assertEqual(timezone.tz_or_local(), timezone.local) + self.assertTrue(timezone.tz_or_local(timezone.utc)) + + def test_to_local(self): + self.assertTrue( + timezone.to_local(make_aware(datetime.utcnow(), timezone.utc)), + ) + self.assertTrue( + timezone.to_local(datetime.utcnow()) + ) + + def test_to_local_fallback(self): + self.assertTrue( + timezone.to_local_fallback( + make_aware(datetime.utcnow(), timezone.utc)), + ) + self.assertTrue( + timezone.to_local_fallback(datetime.utcnow()) + ) + + +class test_make_aware(Case): + + def test_tz_without_localize(self): + tz = tzinfo() + self.assertFalse(hasattr(tz, 'localize')) + wtz = make_aware(datetime.utcnow(), tz) + self.assertEqual(wtz.tzinfo, tz) + + def test_when_has_localize(self): + + class tzz(tzinfo): + raises = False + + def localize(self, dt, is_dst=None): + self.localized = True + if self.raises and is_dst is None: + self.raised = True + raise AmbiguousTimeError() + return 1 # needed by min() in Python 3 (None not hashable) + + tz = tzz() + make_aware(datetime.utcnow(), tz) + self.assertTrue(tz.localized) + + tz2 = tzz() + tz2.raises = True + make_aware(datetime.utcnow(), tz2) + self.assertTrue(tz2.localized) + self.assertTrue(tz2.raised) + + def test_maybe_make_aware(self): + aware = datetime.utcnow().replace(tzinfo=timezone.utc) + self.assertTrue(maybe_make_aware(aware), timezone.utc) + naive = datetime.utcnow() + self.assertTrue(maybe_make_aware(naive)) + + +class test_localize(Case): + + def test_tz_without_normalize(self): + tz = tzinfo() + self.assertFalse(hasattr(tz, 'normalize')) + self.assertTrue(localize(make_aware(datetime.utcnow(), tz), tz)) + + def test_when_has_normalize(self): + + class tzz(tzinfo): + raises = None + + def normalize(self, dt, **kwargs): + self.normalized = True + if self.raises and kwargs and kwargs.get('is_dst') is None: + self.raised = True + raise self.raises + return 1 # needed by min() in Python 3 (None not hashable) + + tz = tzz() + localize(make_aware(datetime.utcnow(), tz), tz) + self.assertTrue(tz.normalized) + + tz2 = tzz() + tz2.raises = AmbiguousTimeError() + localize(make_aware(datetime.utcnow(), tz2), tz2) + self.assertTrue(tz2.normalized) + self.assertTrue(tz2.raised) + + tz3 = tzz() + tz3.raises = TypeError() + localize(make_aware(datetime.utcnow(), tz3), tz3) + self.assertTrue(tz3.normalized) + self.assertTrue(tz3.raised) + + +class test_rate_limit_string(Case): + + def test_conversion(self): + self.assertEqual(rate(999), 999) + self.assertEqual(rate(7.5), 7.5) + self.assertEqual(rate('2.5/s'), 2.5) + self.assertEqual(rate('1456/s'), 1456) + self.assertEqual(rate('100/m'), + 100 / 60.0) + self.assertEqual(rate('10/h'), + 10 / 60.0 / 60.0) + + for zero in (0, None, '0', '0/m', '0/h', '0/s', '0.0/s'): + self.assertEqual(rate(zero), 0) + + +class test_ffwd(Case): + + def test_repr(self): + x = ffwd(year=2012) + self.assertTrue(repr(x)) + + def test_radd_with_unknown_gives_NotImplemented(self): + x = ffwd(year=2012) + self.assertEqual(x.__radd__(object()), NotImplemented) + + +class test_utcoffset(Case): + + def test_utcoffset(self): + with patch('celery.utils.timeutils._time') as _time: + _time.daylight = True + self.assertIsNotNone(utcoffset()) + _time.daylight = False + self.assertIsNotNone(utcoffset()) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_utils.py b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_utils.py new file mode 100644 index 0000000..2837ad6 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/utils/test_utils.py @@ -0,0 +1,108 @@ +from __future__ import absolute_import + +import pytz + +from datetime import datetime, date, time, timedelta + +from kombu import Queue + +from celery.utils import ( + chunks, + is_iterable, + cached_property, + warn_deprecated, + worker_direct, + gen_task_name, + jsonify, +) +from celery.tests.case import Case, Mock, patch + + +def double(x): + return x * 2 + + +class test_worker_direct(Case): + + def test_returns_if_queue(self): + q = Queue('foo') + self.assertIs(worker_direct(q), q) + + +class test_gen_task_name(Case): + + def test_no_module(self): + app = Mock() + app.name == '__main__' + self.assertTrue(gen_task_name(app, 'foo', 'axsadaewe')) + + +class test_jsonify(Case): + + def test_simple(self): + self.assertTrue(jsonify(Queue('foo'))) + self.assertTrue(jsonify(['foo', 'bar', 'baz'])) + self.assertTrue(jsonify({'foo': 'bar'})) + self.assertTrue(jsonify(datetime.utcnow())) + self.assertTrue(jsonify(datetime.utcnow().replace(tzinfo=pytz.utc))) + self.assertTrue(jsonify(datetime.utcnow().replace(microsecond=0))) + self.assertTrue(jsonify(date(2012, 1, 1))) + self.assertTrue(jsonify(time(hour=1, minute=30))) + self.assertTrue(jsonify(time(hour=1, minute=30, microsecond=3))) + self.assertTrue(jsonify(timedelta(seconds=30))) + self.assertTrue(jsonify(10)) + self.assertTrue(jsonify(10.3)) + self.assertTrue(jsonify('hello')) + + with self.assertRaises(ValueError): + jsonify(object()) + + +class test_chunks(Case): + + def test_chunks(self): + + # n == 2 + x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2) + self.assertListEqual( + list(x), + [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]], + ) + + # n == 3 + x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3) + self.assertListEqual( + list(x), + [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]], + ) + + # n == 2 (exact) + x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), 2) + self.assertListEqual( + list(x), + [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]], + ) + + +class test_utils(Case): + + def test_is_iterable(self): + for a in 'f', ['f'], ('f', ), {'f': 'f'}: + self.assertTrue(is_iterable(a)) + for b in object(), 1: + self.assertFalse(is_iterable(b)) + + def test_cached_property(self): + + def fun(obj): + return fun.value + + x = cached_property(fun) + self.assertIs(x.__get__(None), x) + self.assertIs(x.__set__(None, None), x) + self.assertIs(x.__delete__(None), x) + + @patch('warnings.warn') + def test_warn_deprecated(self, warn): + warn_deprecated('Foo') + self.assertTrue(warn.called) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoreload.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoreload.py new file mode 100644 index 0000000..e61b330 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoreload.py @@ -0,0 +1,328 @@ +from __future__ import absolute_import + +import errno +import select +import sys + +from time import time + +from celery.worker import autoreload +from celery.worker.autoreload import ( + WorkerComponent, + file_hash, + BaseMonitor, + StatMonitor, + KQueueMonitor, + InotifyMonitor, + default_implementation, + Autoreloader, +) + +from celery.tests.case import AppCase, Case, Mock, SkipTest, patch, mock_open + + +class test_WorkerComponent(AppCase): + + def test_create_threaded(self): + w = Mock() + w.use_eventloop = False + x = WorkerComponent(w) + x.instantiate = Mock() + r = x.create(w) + x.instantiate.assert_called_with(w.autoreloader_cls, w) + self.assertIs(r, w.autoreloader) + + @patch('select.kevent', create=True) + @patch('select.kqueue', create=True) + @patch('kombu.utils.eventio.kqueue') + def test_create_ev(self, kq, kqueue, kevent): + w = Mock() + w.use_eventloop = True + x = WorkerComponent(w) + x.instantiate = Mock() + r = x.create(w) + x.instantiate.assert_called_with(w.autoreloader_cls, w) + x.register_with_event_loop(w, w.hub) + self.assertIsNone(r) + w.hub.on_close.add.assert_called_with( + w.autoreloader.on_event_loop_close, + ) + + +class test_file_hash(Case): + + def test_hash(self): + with mock_open() as a: + a.write('the quick brown fox\n') + a.seek(0) + A = file_hash('foo') + with mock_open() as b: + b.write('the quick brown bar\n') + b.seek(0) + B = file_hash('bar') + self.assertNotEqual(A, B) + + +class test_BaseMonitor(Case): + + def test_start_stop_on_change(self): + x = BaseMonitor(['a', 'b']) + + with self.assertRaises(NotImplementedError): + x.start() + x.stop() + x.on_change([]) + x._on_change = Mock() + x.on_change('foo') + x._on_change.assert_called_with('foo') + + +class test_StatMonitor(Case): + + @patch('os.stat') + def test_start(self, stat): + + class st(object): + st_mtime = time() + stat.return_value = st() + x = StatMonitor(['a', 'b']) + + def on_is_set(): + if x.shutdown_event.is_set.call_count > 3: + return True + return False + x.shutdown_event = Mock() + x.shutdown_event.is_set.side_effect = on_is_set + + x.start() + x.shutdown_event = Mock() + stat.side_effect = OSError() + x.start() + + @patch('os.stat') + def test_mtime_stat_raises(self, stat): + stat.side_effect = ValueError() + x = StatMonitor(['a', 'b']) + x._mtime('a') + + +class test_KQueueMonitor(Case): + + @patch('select.kqueue', create=True) + @patch('os.close') + def test_stop(self, close, kqueue): + x = KQueueMonitor(['a', 'b']) + x.poller = Mock() + x.filemap['a'] = 10 + x.stop() + x.poller.close.assert_called_with() + close.assert_called_with(10) + + close.side_effect = OSError() + close.side_effect.errno = errno.EBADF + x.stop() + + def test_register_with_event_loop(self): + from kombu.utils import eventio + if eventio.kqueue is None: + raise SkipTest('version of kombu does not work with pypy') + x = KQueueMonitor(['a', 'b']) + hub = Mock(name='hub') + x.add_events = Mock(name='add_events()') + x.register_with_event_loop(hub) + x.add_events.assert_called_with(x._kq) + self.assertEqual( + x._kq.on_file_change, + x.handle_event, + ) + + def test_on_event_loop_close(self): + x = KQueueMonitor(['a', 'b']) + x.close = Mock() + x._kq = Mock(name='_kq') + x.on_event_loop_close(Mock(name='hub')) + x.close.assert_called_with(x._kq) + + def test_handle_event(self): + x = KQueueMonitor(['a', 'b']) + x.on_change = Mock() + eA = Mock() + eA.ident = 'a' + eB = Mock() + eB.ident = 'b' + x.fdmap = {'a': 'A', 'b': 'B'} + x.handle_event([eA, eB]) + x.on_change.assert_called_with(['A', 'B']) + + @patch('kombu.utils.eventio.kqueue', create=True) + @patch('kombu.utils.eventio.kevent', create=True) + @patch('os.open') + @patch('select.kqueue', create=True) + def test_start(self, _kq, osopen, kevent, kqueue): + from kombu.utils import eventio + prev_poll, eventio.poll = eventio.poll, kqueue + prev = {} + flags = ['KQ_FILTER_VNODE', 'KQ_EV_ADD', 'KQ_EV_ENABLE', + 'KQ_EV_CLEAR', 'KQ_NOTE_WRITE', 'KQ_NOTE_EXTEND'] + for i, flag in enumerate(flags): + prev[flag] = getattr(eventio, flag, None) + if not prev[flag]: + setattr(eventio, flag, i) + try: + kq = kqueue.return_value = Mock() + + class ev(object): + ident = 10 + filter = eventio.KQ_FILTER_VNODE + fflags = eventio.KQ_NOTE_WRITE + kq.control.return_value = [ev()] + x = KQueueMonitor(['a']) + osopen.return_value = 10 + calls = [0] + + def on_is_set(): + calls[0] += 1 + if calls[0] > 2: + return True + return False + x.shutdown_event = Mock() + x.shutdown_event.is_set.side_effect = on_is_set + x.start() + finally: + for flag in flags: + if prev[flag]: + setattr(eventio, flag, prev[flag]) + else: + delattr(eventio, flag) + eventio.poll = prev_poll + + +class test_InotifyMonitor(Case): + + @patch('celery.worker.autoreload.pyinotify') + def test_start(self, inotify): + x = InotifyMonitor(['a']) + inotify.IN_MODIFY = 1 + inotify.IN_ATTRIB = 2 + x.start() + + inotify.WatchManager.side_effect = ValueError() + with self.assertRaises(ValueError): + x.start() + x.stop() + + x._on_change = None + x.process_(Mock()) + x._on_change = Mock() + x.process_(Mock()) + self.assertTrue(x._on_change.called) + + +class test_default_implementation(Case): + + @patch('select.kqueue', create=True) + @patch('kombu.utils.eventio.kqueue', create=True) + def test_kqueue(self, kq, kqueue): + self.assertEqual(default_implementation(), 'kqueue') + + @patch('celery.worker.autoreload.pyinotify') + def test_inotify(self, pyinotify): + kq = getattr(select, 'kqueue', None) + try: + delattr(select, 'kqueue') + except AttributeError: + pass + platform, sys.platform = sys.platform, 'linux' + try: + self.assertEqual(default_implementation(), 'inotify') + ino, autoreload.pyinotify = autoreload.pyinotify, None + try: + self.assertEqual(default_implementation(), 'stat') + finally: + autoreload.pyinotify = ino + finally: + if kq: + select.kqueue = kq + sys.platform = platform + + +class test_Autoreloader(AppCase): + + def test_register_with_event_loop(self): + x = Autoreloader(Mock(), modules=[__name__]) + hub = Mock() + x._monitor = None + x.on_init = Mock() + + def se(*args, **kwargs): + x._monitor = Mock() + x.on_init.side_effect = se + + x.register_with_event_loop(hub) + x.on_init.assert_called_with() + x._monitor.register_with_event_loop.assert_called_with(hub) + + x._monitor.register_with_event_loop.reset_mock() + x.register_with_event_loop(hub) + x._monitor.register_with_event_loop.assert_called_with(hub) + + def test_on_event_loop_close(self): + x = Autoreloader(Mock(), modules=[__name__]) + hub = Mock() + x._monitor = Mock() + x.on_event_loop_close(hub) + x._monitor.on_event_loop_close.assert_called_with(hub) + x._monitor = None + x.on_event_loop_close(hub) + + @patch('celery.worker.autoreload.file_hash') + def test_start(self, fhash): + x = Autoreloader(Mock(), modules=[__name__]) + x.Monitor = Mock() + mon = x.Monitor.return_value = Mock() + mon.start.side_effect = OSError() + mon.start.side_effect.errno = errno.EINTR + x.body() + mon.start.side_effect.errno = errno.ENOENT + with self.assertRaises(OSError): + x.body() + mon.start.side_effect = None + x.body() + + @patch('celery.worker.autoreload.file_hash') + @patch('os.path.exists') + def test_maybe_modified(self, exists, fhash): + exists.return_value = True + fhash.return_value = 'abcd' + x = Autoreloader(Mock(), modules=[__name__]) + x._hashes = {} + x._hashes[__name__] = 'dcba' + self.assertTrue(x._maybe_modified(__name__)) + x._hashes[__name__] = 'abcd' + self.assertFalse(x._maybe_modified(__name__)) + exists.return_value = False + self.assertFalse(x._maybe_modified(__name__)) + + def test_on_change(self): + x = Autoreloader(Mock(), modules=[__name__]) + mm = x._maybe_modified = Mock(0) + mm.return_value = True + x._reload = Mock() + x.file_to_module[__name__] = __name__ + x.on_change([__name__]) + self.assertTrue(x._reload.called) + mm.return_value = False + x.on_change([__name__]) + + def test_reload(self): + x = Autoreloader(Mock(), modules=[__name__]) + x._reload([__name__]) + x.controller.reload.assert_called_with([__name__], reload=True) + + def test_stop(self): + x = Autoreloader(Mock(), modules=[__name__]) + x._monitor = None + x.stop() + x._monitor = Mock() + x.stop() + x._monitor.stop.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoscale.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoscale.py new file mode 100644 index 0000000..45ea488 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_autoscale.py @@ -0,0 +1,198 @@ +from __future__ import absolute_import + +import sys + +from celery.concurrency.base import BasePool +from celery.five import monotonic +from celery.worker import state +from celery.worker import autoscale +from celery.tests.case import AppCase, Mock, patch, sleepdeprived + + +class Object(object): + pass + + +class MockPool(BasePool): + shrink_raises_exception = False + shrink_raises_ValueError = False + + def __init__(self, *args, **kwargs): + super(MockPool, self).__init__(*args, **kwargs) + self._pool = Object() + self._pool._processes = self.limit + + def grow(self, n=1): + self._pool._processes += n + + def shrink(self, n=1): + if self.shrink_raises_exception: + raise KeyError('foo') + if self.shrink_raises_ValueError: + raise ValueError('foo') + self._pool._processes -= n + + @property + def num_processes(self): + return self._pool._processes + + +class test_WorkerComponent(AppCase): + + def test_register_with_event_loop(self): + parent = Mock(name='parent') + parent.autoscale = True + parent.consumer.on_task_message = set() + w = autoscale.WorkerComponent(parent) + self.assertIsNone(parent.autoscaler) + self.assertTrue(w.enabled) + + hub = Mock(name='hub') + w.create(parent) + w.register_with_event_loop(parent, hub) + self.assertIn( + parent.autoscaler.maybe_scale, + parent.consumer.on_task_message, + ) + hub.call_repeatedly.assert_called_with( + parent.autoscaler.keepalive, parent.autoscaler.maybe_scale, + ) + + parent.hub = hub + hub.on_init = [] + w.instantiate = Mock() + w.register_with_event_loop(parent, Mock(name='loop')) + self.assertTrue(parent.consumer.on_task_message) + + +class test_Autoscaler(AppCase): + + def setup(self): + self.pool = MockPool(3) + + def test_stop(self): + + class Scaler(autoscale.Autoscaler): + alive = True + joined = False + + def is_alive(self): + return self.alive + + def join(self, timeout=None): + self.joined = True + + worker = Mock(name='worker') + x = Scaler(self.pool, 10, 3, worker=worker) + x._is_stopped.set() + x.stop() + self.assertTrue(x.joined) + x.joined = False + x.alive = False + x.stop() + self.assertFalse(x.joined) + + @sleepdeprived(autoscale) + def test_body(self): + worker = Mock(name='worker') + x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) + x.body() + self.assertEqual(x.pool.num_processes, 3) + for i in range(20): + state.reserved_requests.add(i) + x.body() + x.body() + self.assertEqual(x.pool.num_processes, 10) + self.assertTrue(worker.consumer._update_prefetch_count.called) + state.reserved_requests.clear() + x.body() + self.assertEqual(x.pool.num_processes, 10) + x._last_action = monotonic() - 10000 + x.body() + self.assertEqual(x.pool.num_processes, 3) + self.assertTrue(worker.consumer._update_prefetch_count.called) + + def test_run(self): + + class Scaler(autoscale.Autoscaler): + scale_called = False + + def body(self): + self.scale_called = True + self._is_shutdown.set() + + worker = Mock(name='worker') + x = Scaler(self.pool, 10, 3, worker=worker) + x.run() + self.assertTrue(x._is_shutdown.isSet()) + self.assertTrue(x._is_stopped.isSet()) + self.assertTrue(x.scale_called) + + def test_shrink_raises_exception(self): + worker = Mock(name='worker') + x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) + x.scale_up(3) + x._last_action = monotonic() - 10000 + x.pool.shrink_raises_exception = True + x.scale_down(1) + + @patch('celery.worker.autoscale.debug') + def test_shrink_raises_ValueError(self, debug): + worker = Mock(name='worker') + x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) + x.scale_up(3) + x._last_action = monotonic() - 10000 + x.pool.shrink_raises_ValueError = True + x.scale_down(1) + self.assertTrue(debug.call_count) + + def test_update_and_force(self): + worker = Mock(name='worker') + x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) + self.assertEqual(x.processes, 3) + x.force_scale_up(5) + self.assertEqual(x.processes, 8) + x.update(5, None) + self.assertEqual(x.processes, 5) + x.force_scale_down(3) + self.assertEqual(x.processes, 2) + x.update(3, None) + self.assertEqual(x.processes, 3) + x.force_scale_down(1000) + self.assertEqual(x.min_concurrency, 0) + self.assertEqual(x.processes, 0) + x.force_scale_up(1000) + x.min_concurrency = 1 + x.force_scale_down(1) + + x.update(max=300, min=10) + x.update(max=300, min=2) + x.update(max=None, min=None) + + def test_info(self): + worker = Mock(name='worker') + x = autoscale.Autoscaler(self.pool, 10, 3, worker=worker) + info = x.info() + self.assertEqual(info['max'], 10) + self.assertEqual(info['min'], 3) + self.assertEqual(info['current'], 3) + + @patch('os._exit') + def test_thread_crash(self, _exit): + + class _Autoscaler(autoscale.Autoscaler): + + def body(self): + self._is_shutdown.set() + raise OSError('foo') + worker = Mock(name='worker') + x = _Autoscaler(self.pool, 10, 3, worker=worker) + + stderr = Mock() + p, sys.stderr = sys.stderr, stderr + try: + x.run() + finally: + sys.stderr = p + _exit.assert_called_with(1) + self.assertTrue(stderr.write.call_count) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_bootsteps.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_bootsteps.py new file mode 100644 index 0000000..522d263 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_bootsteps.py @@ -0,0 +1,338 @@ +from __future__ import absolute_import + +from celery import bootsteps + +from celery.tests.case import AppCase, Mock, patch + + +class test_StepFormatter(AppCase): + + def test_get_prefix(self): + f = bootsteps.StepFormatter() + s = Mock() + s.last = True + self.assertEqual(f._get_prefix(s), f.blueprint_prefix) + + s2 = Mock() + s2.last = False + s2.conditional = True + self.assertEqual(f._get_prefix(s2), f.conditional_prefix) + + s3 = Mock() + s3.last = s3.conditional = False + self.assertEqual(f._get_prefix(s3), '') + + def test_node(self): + f = bootsteps.StepFormatter() + f.draw_node = Mock() + step = Mock() + step.last = False + f.node(step, x=3) + f.draw_node.assert_called_with(step, f.node_scheme, {'x': 3}) + + step.last = True + f.node(step, x=3) + f.draw_node.assert_called_with(step, f.blueprint_scheme, {'x': 3}) + + def test_edge(self): + f = bootsteps.StepFormatter() + f.draw_edge = Mock() + a, b = Mock(), Mock() + a.last = True + f.edge(a, b, x=6) + f.draw_edge.assert_called_with(a, b, f.edge_scheme, { + 'x': 6, 'arrowhead': 'none', 'color': 'darkseagreen3', + }) + + a.last = False + f.edge(a, b, x=6) + f.draw_edge.assert_called_with(a, b, f.edge_scheme, { + 'x': 6, + }) + + +class test_Step(AppCase): + + class Def(bootsteps.StartStopStep): + name = 'test_Step.Def' + + def setup(self): + self.steps = [] + + def test_blueprint_name(self, bp='test_blueprint_name'): + + class X(bootsteps.Step): + blueprint = bp + name = 'X' + self.assertEqual(X.name, 'X') + + class Y(bootsteps.Step): + name = '%s.Y' % bp + self.assertEqual(Y.name, '%s.Y' % bp) + + def test_init(self): + self.assertTrue(self.Def(self)) + + def test_create(self): + self.Def(self).create(self) + + def test_include_if(self): + x = self.Def(self) + x.enabled = True + self.assertTrue(x.include_if(self)) + + x.enabled = False + self.assertFalse(x.include_if(self)) + + def test_instantiate(self): + self.assertIsInstance(self.Def(self).instantiate(self.Def, self), + self.Def) + + def test_include_when_enabled(self): + x = self.Def(self) + x.create = Mock() + x.create.return_value = 'George' + self.assertTrue(x.include(self)) + + self.assertEqual(x.obj, 'George') + x.create.assert_called_with(self) + + def test_include_when_disabled(self): + x = self.Def(self) + x.enabled = False + x.create = Mock() + + self.assertFalse(x.include(self)) + self.assertFalse(x.create.call_count) + + def test_repr(self): + x = self.Def(self) + self.assertTrue(repr(x)) + + +class test_ConsumerStep(AppCase): + + def test_interface(self): + step = bootsteps.ConsumerStep(self) + with self.assertRaises(NotImplementedError): + step.get_consumers(self) + + def test_start_stop_shutdown(self): + consumer = Mock() + self.connection = Mock() + + class Step(bootsteps.ConsumerStep): + + def get_consumers(self, c): + return [consumer] + + step = Step(self) + self.assertEqual(step.get_consumers(self), [consumer]) + + step.start(self) + consumer.consume.assert_called_with() + step.stop(self) + consumer.cancel.assert_called_with() + + step.shutdown(self) + consumer.channel.close.assert_called_with() + + def test_start_no_consumers(self): + self.connection = Mock() + + class Step(bootsteps.ConsumerStep): + + def get_consumers(self, c): + return () + + step = Step(self) + step.start(self) + + +class test_StartStopStep(AppCase): + + class Def(bootsteps.StartStopStep): + name = 'test_StartStopStep.Def' + + def setup(self): + self.steps = [] + + def test_start__stop(self): + x = self.Def(self) + x.create = Mock() + + # include creates the underlying object and sets + # its x.obj attribute to it, as well as appending + # it to the parent.steps list. + x.include(self) + self.assertTrue(self.steps) + self.assertIs(self.steps[0], x) + + x.start(self) + x.obj.start.assert_called_with() + + x.stop(self) + x.obj.stop.assert_called_with() + + x.obj = None + self.assertIsNone(x.start(self)) + + def test_include_when_disabled(self): + x = self.Def(self) + x.enabled = False + x.include(self) + self.assertFalse(self.steps) + + def test_terminate(self): + x = self.Def(self) + x.create = Mock() + + x.include(self) + delattr(x.obj, 'terminate') + x.terminate(self) + x.obj.stop.assert_called_with() + + +class test_Blueprint(AppCase): + + class Blueprint(bootsteps.Blueprint): + name = 'test_Blueprint' + + def test_steps_added_to_unclaimed(self): + + class tnA(bootsteps.Step): + name = 'test_Blueprint.A' + + class tnB(bootsteps.Step): + name = 'test_Blueprint.B' + + class xxA(bootsteps.Step): + name = 'xx.A' + + class Blueprint(self.Blueprint): + default_steps = [tnA, tnB] + blueprint = Blueprint(app=self.app) + + self.assertIn(tnA, blueprint._all_steps()) + self.assertIn(tnB, blueprint._all_steps()) + self.assertNotIn(xxA, blueprint._all_steps()) + + def test_init(self): + blueprint = self.Blueprint(app=self.app) + self.assertIs(blueprint.app, self.app) + self.assertEqual(blueprint.name, 'test_Blueprint') + + def test_close__on_close_is_None(self): + blueprint = self.Blueprint(app=self.app) + blueprint.on_close = None + blueprint.send_all = Mock() + blueprint.close(1) + blueprint.send_all.assert_called_with( + 1, 'close', 'closing', reverse=False, + ) + + def test_send_all_with_None_steps(self): + parent = Mock() + blueprint = self.Blueprint(app=self.app) + parent.steps = [None, None, None] + blueprint.send_all(parent, 'close', 'Closing', reverse=False) + + def test_join_raises_IGNORE_ERRORS(self): + prev, bootsteps.IGNORE_ERRORS = bootsteps.IGNORE_ERRORS, (KeyError, ) + try: + blueprint = self.Blueprint(app=self.app) + blueprint.shutdown_complete = Mock() + blueprint.shutdown_complete.wait.side_effect = KeyError('luke') + blueprint.join(timeout=10) + blueprint.shutdown_complete.wait.assert_called_with(timeout=10) + finally: + bootsteps.IGNORE_ERRORS = prev + + def test_connect_with(self): + + class b1s1(bootsteps.Step): + pass + + class b1s2(bootsteps.Step): + last = True + + class b2s1(bootsteps.Step): + pass + + class b2s2(bootsteps.Step): + last = True + + b1 = self.Blueprint([b1s1, b1s2], app=self.app) + b2 = self.Blueprint([b2s1, b2s2], app=self.app) + b1.apply(Mock()) + b2.apply(Mock()) + b1.connect_with(b2) + + self.assertIn(b1s1, b1.graph) + self.assertIn(b2s1, b1.graph) + self.assertIn(b2s2, b1.graph) + + self.assertTrue(repr(b1s1)) + self.assertTrue(str(b1s1)) + + def test_topsort_raises_KeyError(self): + + class Step(bootsteps.Step): + requires = ('xyxxx.fsdasewe.Unknown', ) + + b = self.Blueprint([Step], app=self.app) + b.steps = b.claim_steps() + with self.assertRaises(ImportError): + b._finalize_steps(b.steps) + Step.requires = () + + b.steps = b.claim_steps() + b._finalize_steps(b.steps) + + with patch('celery.bootsteps.DependencyGraph') as Dep: + g = Dep.return_value = Mock() + g.topsort.side_effect = KeyError('foo') + with self.assertRaises(KeyError): + b._finalize_steps(b.steps) + + def test_apply(self): + + class MyBlueprint(bootsteps.Blueprint): + name = 'test_apply' + + def modules(self): + return ['A', 'B'] + + class B(bootsteps.Step): + name = 'test_apply.B' + + class C(bootsteps.Step): + name = 'test_apply.C' + requires = [B] + + class A(bootsteps.Step): + name = 'test_apply.A' + requires = [C] + + class D(bootsteps.Step): + name = 'test_apply.D' + last = True + + x = MyBlueprint([A, D], app=self.app) + x.apply(self) + + self.assertIsInstance(x.order[0], B) + self.assertIsInstance(x.order[1], C) + self.assertIsInstance(x.order[2], A) + self.assertIsInstance(x.order[3], D) + self.assertIn(A, x.types) + self.assertIs(x[A.name], x.order[2]) + + def test_find_last_but_no_steps(self): + + class MyBlueprint(bootsteps.Blueprint): + name = 'qwejwioqjewoqiej' + + x = MyBlueprint(app=self.app) + x.apply(self) + self.assertIsNone(x._find_last()) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_components.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_components.py new file mode 100644 index 0000000..b39865d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_components.py @@ -0,0 +1,38 @@ +from __future__ import absolute_import + +# some of these are tested in test_worker, so I've only written tests +# here to complete coverage. Should move everyting to this module at some +# point [-ask] + +from celery.worker.components import ( + Queues, + Pool, +) + +from celery.tests.case import AppCase, Mock + + +class test_Queues(AppCase): + + def test_create_when_eventloop(self): + w = Mock() + w.use_eventloop = w.pool_putlocks = w.pool_cls.uses_semaphore = True + q = Queues(w) + q.create(w) + self.assertIs(w.process_task, w._process_task_sem) + + +class test_Pool(AppCase): + + def test_close_terminate(self): + w = Mock() + comp = Pool(w) + pool = w.pool = Mock() + comp.close(w) + pool.close.assert_called_with() + comp.terminate(w) + pool.terminate.assert_called_with() + + w.pool = None + comp.close(w) + comp.terminate(w) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_consumer.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_consumer.py new file mode 100644 index 0000000..ea4f6bb --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_consumer.py @@ -0,0 +1,512 @@ +from __future__ import absolute_import + +import errno +import socket + +from billiard.exceptions import RestartFreqExceeded + +from celery.datastructures import LimitedSet +from celery.worker import state as worker_state +from celery.worker.consumer import ( + Consumer, + Heart, + Tasks, + Agent, + Mingle, + Gossip, + dump_body, + CLOSE, +) + +from celery.tests.case import AppCase, ContextMock, Mock, SkipTest, call, patch + + +class test_Consumer(AppCase): + + def get_consumer(self, no_hub=False, **kwargs): + consumer = Consumer( + on_task_request=Mock(), + init_callback=Mock(), + pool=Mock(), + app=self.app, + timer=Mock(), + controller=Mock(), + hub=None if no_hub else Mock(), + **kwargs + ) + consumer.blueprint = Mock() + consumer._restart_state = Mock() + consumer.connection = _amqp_connection() + consumer.connection_errors = (socket.error, OSError, ) + return consumer + + def test_taskbuckets_defaultdict(self): + c = self.get_consumer() + self.assertIsNone(c.task_buckets['fooxasdwx.wewe']) + + def test_dump_body_buffer(self): + msg = Mock() + msg.body = 'str' + try: + buf = buffer(msg.body) + except NameError: + raise SkipTest('buffer type not available') + self.assertTrue(dump_body(msg, buf)) + + def test_sets_heartbeat(self): + c = self.get_consumer(amqheartbeat=10) + self.assertEqual(c.amqheartbeat, 10) + self.app.conf.BROKER_HEARTBEAT = 20 + c = self.get_consumer(amqheartbeat=None) + self.assertEqual(c.amqheartbeat, 20) + + def test_gevent_bug_disables_connection_timeout(self): + with patch('celery.worker.consumer._detect_environment') as de: + de.return_value = 'gevent' + self.app.conf.BROKER_CONNECTION_TIMEOUT = 33.33 + self.get_consumer() + self.assertIsNone(self.app.conf.BROKER_CONNECTION_TIMEOUT) + + def test_limit_task(self): + c = self.get_consumer() + + with patch('celery.worker.consumer.task_reserved') as reserved: + bucket = Mock() + request = Mock() + bucket.can_consume.return_value = True + + c._limit_task(request, bucket, 3) + bucket.can_consume.assert_called_with(3) + reserved.assert_called_with(request) + c.on_task_request.assert_called_with(request) + + with patch('celery.worker.consumer.task_reserved') as reserved: + bucket.can_consume.return_value = False + bucket.expected_time.return_value = 3.33 + c._limit_task(request, bucket, 4) + bucket.can_consume.assert_called_with(4) + c.timer.call_after.assert_called_with( + 3.33, c._limit_task, (request, bucket, 4), + ) + bucket.expected_time.assert_called_with(4) + self.assertFalse(reserved.called) + + def test_start_blueprint_raises_EMFILE(self): + c = self.get_consumer() + exc = c.blueprint.start.side_effect = OSError() + exc.errno = errno.EMFILE + + with self.assertRaises(OSError): + c.start() + + def test_max_restarts_exceeded(self): + c = self.get_consumer() + + def se(*args, **kwargs): + c.blueprint.state = CLOSE + raise RestartFreqExceeded() + c._restart_state.step.side_effect = se + c.blueprint.start.side_effect = socket.error() + + with patch('celery.worker.consumer.sleep') as sleep: + c.start() + sleep.assert_called_with(1) + + def _closer(self, c): + def se(*args, **kwargs): + c.blueprint.state = CLOSE + return se + + def test_collects_at_restart(self): + c = self.get_consumer() + c.connection.collect.side_effect = MemoryError() + c.blueprint.start.side_effect = socket.error() + c.blueprint.restart.side_effect = self._closer(c) + c.start() + c.connection.collect.assert_called_with() + + def test_register_with_event_loop(self): + c = self.get_consumer() + c.register_with_event_loop(Mock(name='loop')) + + def test_on_close_clears_semaphore_timer_and_reqs(self): + with patch('celery.worker.consumer.reserved_requests') as reserved: + c = self.get_consumer() + c.on_close() + c.controller.semaphore.clear.assert_called_with() + c.timer.clear.assert_called_with() + reserved.clear.assert_called_with() + c.pool.flush.assert_called_with() + + c.controller = None + c.timer = None + c.pool = None + c.on_close() + + def test_connect_error_handler(self): + self.app.connection = _amqp_connection() + conn = self.app.connection.return_value + c = self.get_consumer() + self.assertTrue(c.connect()) + self.assertTrue(conn.ensure_connection.called) + errback = conn.ensure_connection.call_args[0][0] + conn.alt = [(1, 2, 3)] + errback(Mock(), 0) + + +class test_Heart(AppCase): + + def test_start(self): + c = Mock() + c.timer = Mock() + c.event_dispatcher = Mock() + + with patch('celery.worker.heartbeat.Heart') as hcls: + h = Heart(c) + self.assertTrue(h.enabled) + self.assertEqual(h.heartbeat_interval, None) + self.assertIsNone(c.heart) + + h.start(c) + self.assertTrue(c.heart) + hcls.assert_called_with(c.timer, c.event_dispatcher, + h.heartbeat_interval) + c.heart.start.assert_called_with() + + def test_start_heartbeat_interval(self): + c = Mock() + c.timer = Mock() + c.event_dispatcher = Mock() + + with patch('celery.worker.heartbeat.Heart') as hcls: + h = Heart(c, False, 20) + self.assertTrue(h.enabled) + self.assertEqual(h.heartbeat_interval, 20) + self.assertIsNone(c.heart) + + h.start(c) + self.assertTrue(c.heart) + hcls.assert_called_with(c.timer, c.event_dispatcher, + h.heartbeat_interval) + c.heart.start.assert_called_with() + + +class test_Tasks(AppCase): + + def test_stop(self): + c = Mock() + tasks = Tasks(c) + self.assertIsNone(c.task_consumer) + self.assertIsNone(c.qos) + + c.task_consumer = Mock() + tasks.stop(c) + + def test_stop_already_stopped(self): + c = Mock() + tasks = Tasks(c) + tasks.stop(c) + + +class test_Agent(AppCase): + + def test_start(self): + c = Mock() + agent = Agent(c) + agent.instantiate = Mock() + agent.agent_cls = 'foo:Agent' + self.assertIsNotNone(agent.create(c)) + agent.instantiate.assert_called_with(agent.agent_cls, c.connection) + + +class test_Mingle(AppCase): + + def test_start_no_replies(self): + c = Mock() + c.app.connection = _amqp_connection() + mingle = Mingle(c) + I = c.app.control.inspect.return_value = Mock() + I.hello.return_value = {} + mingle.start(c) + + def test_start(self): + try: + c = Mock() + c.app.connection = _amqp_connection() + mingle = Mingle(c) + self.assertTrue(mingle.enabled) + + Aig = LimitedSet() + Big = LimitedSet() + Aig.add('Aig-1') + Aig.add('Aig-2') + Big.add('Big-1') + + I = c.app.control.inspect.return_value = Mock() + I.hello.return_value = { + 'A@example.com': { + 'clock': 312, + 'revoked': Aig._data, + }, + 'B@example.com': { + 'clock': 29, + 'revoked': Big._data, + }, + 'C@example.com': { + 'error': 'unknown method', + }, + } + + mingle.start(c) + I.hello.assert_called_with(c.hostname, worker_state.revoked._data) + c.app.clock.adjust.assert_has_calls([ + call(312), call(29), + ], any_order=True) + self.assertIn('Aig-1', worker_state.revoked) + self.assertIn('Aig-2', worker_state.revoked) + self.assertIn('Big-1', worker_state.revoked) + finally: + worker_state.revoked.clear() + + +def _amqp_connection(): + connection = ContextMock() + connection.return_value = ContextMock() + connection.return_value.transport.driver_type = 'amqp' + return connection + + +class test_Gossip(AppCase): + + def test_init(self): + c = self.Consumer() + c.app.connection = _amqp_connection() + g = Gossip(c) + self.assertTrue(g.enabled) + self.assertIs(c.gossip, g) + + def test_callbacks(self): + c = self.Consumer() + c.app.connection = _amqp_connection() + g = Gossip(c) + on_node_join = Mock(name='on_node_join') + on_node_join2 = Mock(name='on_node_join2') + on_node_leave = Mock(name='on_node_leave') + on_node_lost = Mock(name='on.node_lost') + g.on.node_join.add(on_node_join) + g.on.node_join.add(on_node_join2) + g.on.node_leave.add(on_node_leave) + g.on.node_lost.add(on_node_lost) + + worker = Mock(name='worker') + g.on_node_join(worker) + on_node_join.assert_called_with(worker) + on_node_join2.assert_called_with(worker) + g.on_node_leave(worker) + on_node_leave.assert_called_with(worker) + g.on_node_lost(worker) + on_node_lost.assert_called_with(worker) + + def test_election(self): + c = self.Consumer() + c.app.connection = _amqp_connection() + g = Gossip(c) + g.start(c) + g.election('id', 'topic', 'action') + self.assertListEqual(g.consensus_replies['id'], []) + g.dispatcher.send.assert_called_with( + 'worker-elect', id='id', topic='topic', cver=1, action='action', + ) + + def test_call_task(self): + c = self.Consumer() + c.app.connection = _amqp_connection() + g = Gossip(c) + g.start(c) + + with patch('celery.worker.consumer.signature') as signature: + sig = signature.return_value = Mock() + task = Mock() + g.call_task(task) + signature.assert_called_with(task, app=c.app) + sig.apply_async.assert_called_with() + + sig.apply_async.side_effect = MemoryError() + with patch('celery.worker.consumer.error') as error: + g.call_task(task) + self.assertTrue(error.called) + + def Event(self, id='id', clock=312, + hostname='foo@example.com', pid=4312, + topic='topic', action='action', cver=1): + return { + 'id': id, + 'clock': clock, + 'hostname': hostname, + 'pid': pid, + 'topic': topic, + 'action': action, + 'cver': cver, + } + + def test_on_elect(self): + c = self.Consumer() + c.app.connection = _amqp_connection() + g = Gossip(c) + g.start(c) + + event = self.Event('id1') + g.on_elect(event) + in_heap = g.consensus_requests['id1'] + self.assertTrue(in_heap) + g.dispatcher.send.assert_called_with('worker-elect-ack', id='id1') + + event.pop('clock') + with patch('celery.worker.consumer.error') as error: + g.on_elect(event) + self.assertTrue(error.called) + + def Consumer(self, hostname='foo@x.com', pid=4312): + c = Mock() + c.app.connection = _amqp_connection() + c.hostname = hostname + c.pid = pid + return c + + def setup_election(self, g, c): + g.start(c) + g.clock = self.app.clock + self.assertNotIn('idx', g.consensus_replies) + self.assertIsNone(g.on_elect_ack({'id': 'idx'})) + + g.state.alive_workers.return_value = [ + 'foo@x.com', 'bar@x.com', 'baz@x.com', + ] + g.consensus_replies['id1'] = [] + g.consensus_requests['id1'] = [] + e1 = self.Event('id1', 1, 'foo@x.com') + e2 = self.Event('id1', 2, 'bar@x.com') + e3 = self.Event('id1', 3, 'baz@x.com') + g.on_elect(e1) + g.on_elect(e2) + g.on_elect(e3) + self.assertEqual(len(g.consensus_requests['id1']), 3) + + with patch('celery.worker.consumer.info'): + g.on_elect_ack(e1) + self.assertEqual(len(g.consensus_replies['id1']), 1) + g.on_elect_ack(e2) + self.assertEqual(len(g.consensus_replies['id1']), 2) + g.on_elect_ack(e3) + with self.assertRaises(KeyError): + g.consensus_replies['id1'] + + def test_on_elect_ack_win(self): + c = self.Consumer(hostname='foo@x.com') # I will win + g = Gossip(c) + handler = g.election_handlers['topic'] = Mock() + self.setup_election(g, c) + handler.assert_called_with('action') + + def test_on_elect_ack_lose(self): + c = self.Consumer(hostname='bar@x.com') # I will lose + c.app.connection = _amqp_connection() + g = Gossip(c) + handler = g.election_handlers['topic'] = Mock() + self.setup_election(g, c) + self.assertFalse(handler.called) + + def test_on_elect_ack_win_but_no_action(self): + c = self.Consumer(hostname='foo@x.com') # I will win + g = Gossip(c) + g.election_handlers = {} + with patch('celery.worker.consumer.error') as error: + self.setup_election(g, c) + self.assertTrue(error.called) + + def test_on_node_join(self): + c = self.Consumer() + g = Gossip(c) + with patch('celery.worker.consumer.debug') as debug: + g.on_node_join(c) + debug.assert_called_with('%s joined the party', 'foo@x.com') + + def test_on_node_leave(self): + c = self.Consumer() + g = Gossip(c) + with patch('celery.worker.consumer.debug') as debug: + g.on_node_leave(c) + debug.assert_called_with('%s left', 'foo@x.com') + + def test_on_node_lost(self): + c = self.Consumer() + g = Gossip(c) + with patch('celery.worker.consumer.info') as info: + g.on_node_lost(c) + info.assert_called_with('missed heartbeat from %s', 'foo@x.com') + + def test_register_timer(self): + c = self.Consumer() + g = Gossip(c) + g.register_timer() + c.timer.call_repeatedly.assert_called_with(g.interval, g.periodic) + tref = g._tref + g.register_timer() + tref.cancel.assert_called_with() + + def test_periodic(self): + c = self.Consumer() + g = Gossip(c) + g.on_node_lost = Mock() + state = g.state = Mock() + worker = Mock() + state.workers = {'foo': worker} + worker.alive = True + worker.hostname = 'foo' + g.periodic() + + worker.alive = False + g.periodic() + g.on_node_lost.assert_called_with(worker) + with self.assertRaises(KeyError): + state.workers['foo'] + + def test_on_message(self): + c = self.Consumer() + g = Gossip(c) + self.assertTrue(g.enabled) + prepare = Mock() + prepare.return_value = 'worker-online', {} + c.app.events.State.assert_called_with( + on_node_join=g.on_node_join, + on_node_leave=g.on_node_leave, + max_tasks_in_memory=1, + ) + g.update_state = Mock() + worker = Mock() + g.on_node_join = Mock() + g.on_node_leave = Mock() + g.update_state.return_value = worker, 1 + message = Mock() + message.delivery_info = {'routing_key': 'worker-online'} + message.headers = {'hostname': 'other'} + + handler = g.event_handlers['worker-online'] = Mock() + g.on_message(prepare, message) + handler.assert_called_with(message.payload) + g.event_handlers = {} + + g.on_message(prepare, message) + + message.delivery_info = {'routing_key': 'worker-offline'} + prepare.return_value = 'worker-offline', {} + g.on_message(prepare, message) + + message.delivery_info = {'routing_key': 'worker-baz'} + prepare.return_value = 'worker-baz', {} + g.update_state.return_value = worker, 0 + g.on_message(prepare, message) + + message.headers = {'hostname': g.hostname} + g.on_message(prepare, message) + g.clock.forward.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_control.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_control.py new file mode 100644 index 0000000..86bf550 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_control.py @@ -0,0 +1,601 @@ +from __future__ import absolute_import + +import sys +import socket + +from collections import defaultdict +from datetime import datetime, timedelta + +from kombu import pidbox + +from celery.datastructures import AttributeDict +from celery.five import Queue as FastQueue +from celery.utils import uuid +from celery.utils.timer2 import Timer +from celery.worker import WorkController as _WC +from celery.worker import consumer +from celery.worker import control +from celery.worker import state as worker_state +from celery.worker.job import Request +from celery.worker.state import revoked +from celery.worker.control import Panel +from celery.worker.pidbox import Pidbox, gPidbox + +from celery.tests.case import AppCase, Mock, call, patch + +hostname = socket.gethostname() + + +class WorkController(object): + autoscaler = None + + def stats(self): + return {'total': worker_state.total_count} + + +class Consumer(consumer.Consumer): + + def __init__(self, app): + self.app = app + self.buffer = FastQueue() + self.handle_task = self.buffer.put + self.timer = Timer() + self.event_dispatcher = Mock() + self.controller = WorkController() + self.task_consumer = Mock() + self.prefetch_multiplier = 1 + self.initial_prefetch_count = 1 + + from celery.concurrency.base import BasePool + self.pool = BasePool(10) + self.task_buckets = defaultdict(lambda: None) + + +class test_Pidbox(AppCase): + + def test_shutdown(self): + with patch('celery.worker.pidbox.ignore_errors') as eig: + parent = Mock() + pbox = Pidbox(parent) + pbox._close_channel = Mock() + self.assertIs(pbox.c, parent) + pconsumer = pbox.consumer = Mock() + cancel = pconsumer.cancel + pbox.shutdown(parent) + eig.assert_called_with(parent, cancel) + pbox._close_channel.assert_called_with(parent) + + +class test_Pidbox_green(AppCase): + + def test_stop(self): + parent = Mock() + g = gPidbox(parent) + stopped = g._node_stopped = Mock() + shutdown = g._node_shutdown = Mock() + close_chan = g._close_channel = Mock() + + g.stop(parent) + shutdown.set.assert_called_with() + stopped.wait.assert_called_with() + close_chan.assert_called_with(parent) + self.assertIsNone(g._node_stopped) + self.assertIsNone(g._node_shutdown) + + close_chan.reset() + g.stop(parent) + close_chan.assert_called_with(parent) + + def test_resets(self): + parent = Mock() + g = gPidbox(parent) + g._resets = 100 + g.reset() + self.assertEqual(g._resets, 101) + + def test_loop(self): + parent = Mock() + conn = parent.connect.return_value = self.app.connection() + drain = conn.drain_events = Mock() + g = gPidbox(parent) + parent.connection = Mock() + do_reset = g._do_reset = Mock() + + call_count = [0] + + def se(*args, **kwargs): + if call_count[0] > 2: + g._node_shutdown.set() + g.reset() + call_count[0] += 1 + drain.side_effect = se + g.loop(parent) + + self.assertEqual(do_reset.call_count, 4) + + +class test_ControlPanel(AppCase): + + def setup(self): + self.panel = self.create_panel(consumer=Consumer(self.app)) + + @self.app.task(name='c.unittest.mytask', rate_limit=200, shared=False) + def mytask(): + pass + self.mytask = mytask + + def create_state(self, **kwargs): + kwargs.setdefault('app', self.app) + kwargs.setdefault('hostname', hostname) + return AttributeDict(kwargs) + + def create_panel(self, **kwargs): + return self.app.control.mailbox.Node(hostname=hostname, + state=self.create_state(**kwargs), + handlers=Panel.data) + + def test_enable_events(self): + consumer = Consumer(self.app) + panel = self.create_panel(consumer=consumer) + evd = consumer.event_dispatcher + evd.groups = set() + panel.handle('enable_events') + self.assertFalse(evd.groups) + evd.groups = set(['worker']) + panel.handle('enable_events') + self.assertIn('task', evd.groups) + evd.groups = set(['task']) + self.assertIn('already enabled', panel.handle('enable_events')['ok']) + + def test_disable_events(self): + consumer = Consumer(self.app) + panel = self.create_panel(consumer=consumer) + evd = consumer.event_dispatcher + evd.enabled = True + evd.groups = set(['task']) + panel.handle('disable_events') + self.assertNotIn('task', evd.groups) + self.assertIn('already disabled', panel.handle('disable_events')['ok']) + + def test_clock(self): + consumer = Consumer(self.app) + panel = self.create_panel(consumer=consumer) + panel.state.app.clock.value = 313 + x = panel.handle('clock') + self.assertEqual(x['clock'], 313) + + def test_hello(self): + consumer = Consumer(self.app) + panel = self.create_panel(consumer=consumer) + panel.state.app.clock.value = 313 + worker_state.revoked.add('revoked1') + try: + x = panel.handle('hello', {'from_node': 'george@vandelay.com'}) + self.assertIn('revoked1', x['revoked']) + self.assertEqual(x['clock'], 314) # incremented + finally: + worker_state.revoked.discard('revoked1') + + def test_conf(self): + return + consumer = Consumer(self.app) + panel = self.create_panel(consumer=consumer) + self.app.conf.SOME_KEY6 = 'hello world' + x = panel.handle('dump_conf') + self.assertIn('SOME_KEY6', x) + + def test_election(self): + consumer = Consumer(self.app) + panel = self.create_panel(consumer=consumer) + consumer.gossip = Mock() + panel.handle( + 'election', {'id': 'id', 'topic': 'topic', 'action': 'action'}, + ) + consumer.gossip.election.assert_called_with('id', 'topic', 'action') + + def test_heartbeat(self): + consumer = Consumer(self.app) + panel = self.create_panel(consumer=consumer) + consumer.event_dispatcher.enabled = True + panel.handle('heartbeat') + self.assertIn(('worker-heartbeat', ), + consumer.event_dispatcher.send.call_args) + + def test_time_limit(self): + panel = self.create_panel(consumer=Mock()) + r = panel.handle('time_limit', arguments=dict( + task_name=self.mytask.name, hard=30, soft=10)) + self.assertEqual( + (self.mytask.time_limit, self.mytask.soft_time_limit), + (30, 10), + ) + self.assertIn('ok', r) + r = panel.handle('time_limit', arguments=dict( + task_name=self.mytask.name, hard=None, soft=None)) + self.assertEqual( + (self.mytask.time_limit, self.mytask.soft_time_limit), + (None, None), + ) + self.assertIn('ok', r) + + r = panel.handle('time_limit', arguments=dict( + task_name='248e8afya9s8dh921eh928', hard=30)) + self.assertIn('error', r) + + def test_active_queues(self): + import kombu + + x = kombu.Consumer(self.app.connection(), + [kombu.Queue('foo', kombu.Exchange('foo'), 'foo'), + kombu.Queue('bar', kombu.Exchange('bar'), 'bar')], + auto_declare=False) + consumer = Mock() + consumer.task_consumer = x + panel = self.create_panel(consumer=consumer) + r = panel.handle('active_queues') + self.assertListEqual(list(sorted(q['name'] for q in r)), + ['bar', 'foo']) + + def test_dump_tasks(self): + info = '\n'.join(self.panel.handle('dump_tasks')) + self.assertIn('mytask', info) + self.assertIn('rate_limit=200', info) + + def test_stats(self): + prev_count, worker_state.total_count = worker_state.total_count, 100 + try: + self.assertDictContainsSubset({'total': 100}, + self.panel.handle('stats')) + finally: + worker_state.total_count = prev_count + + def test_report(self): + self.panel.handle('report') + + def test_active(self): + r = Request({ + 'task': self.mytask.name, + 'id': 'do re mi', + 'args': (), + 'kwargs': {}, + }, app=self.app) + worker_state.active_requests.add(r) + try: + self.assertTrue(self.panel.handle('dump_active')) + finally: + worker_state.active_requests.discard(r) + + def test_pool_grow(self): + + class MockPool(object): + + def __init__(self, size=1): + self.size = size + + def grow(self, n=1): + self.size += n + + def shrink(self, n=1): + self.size -= n + + @property + def num_processes(self): + return self.size + + consumer = Consumer(self.app) + consumer.prefetch_multiplier = 8 + consumer.qos = Mock(name='qos') + consumer.pool = MockPool(1) + panel = self.create_panel(consumer=consumer) + + panel.handle('pool_grow') + self.assertEqual(consumer.pool.size, 2) + consumer.qos.increment_eventually.assert_called_with(8) + self.assertEqual(consumer.initial_prefetch_count, 16) + panel.handle('pool_shrink') + self.assertEqual(consumer.pool.size, 1) + consumer.qos.decrement_eventually.assert_called_with(8) + self.assertEqual(consumer.initial_prefetch_count, 8) + + panel.state.consumer = Mock() + panel.state.consumer.controller = Mock() + sc = panel.state.consumer.controller.autoscaler = Mock() + panel.handle('pool_grow') + self.assertTrue(sc.force_scale_up.called) + panel.handle('pool_shrink') + self.assertTrue(sc.force_scale_down.called) + + def test_add__cancel_consumer(self): + + class MockConsumer(object): + queues = [] + canceled = [] + consuming = False + + def add_queue(self, queue): + self.queues.append(queue.name) + + def consume(self): + self.consuming = True + + def cancel_by_queue(self, queue): + self.canceled.append(queue) + + def consuming_from(self, queue): + return queue in self.queues + + consumer = Consumer(self.app) + consumer.task_consumer = MockConsumer() + panel = self.create_panel(consumer=consumer) + + panel.handle('add_consumer', {'queue': 'MyQueue'}) + self.assertIn('MyQueue', consumer.task_consumer.queues) + self.assertTrue(consumer.task_consumer.consuming) + panel.handle('add_consumer', {'queue': 'MyQueue'}) + panel.handle('cancel_consumer', {'queue': 'MyQueue'}) + self.assertIn('MyQueue', consumer.task_consumer.canceled) + + def test_revoked(self): + worker_state.revoked.clear() + worker_state.revoked.add('a1') + worker_state.revoked.add('a2') + + try: + self.assertEqual(sorted(self.panel.handle('dump_revoked')), + ['a1', 'a2']) + finally: + worker_state.revoked.clear() + + def test_dump_schedule(self): + consumer = Consumer(self.app) + panel = self.create_panel(consumer=consumer) + self.assertFalse(panel.handle('dump_schedule')) + r = Request({ + 'task': self.mytask.name, + 'id': 'CAFEBABE', + 'args': (), + 'kwargs': {}, + }, app=self.app) + consumer.timer.schedule.enter_at( + consumer.timer.Entry(lambda x: x, (r, )), + datetime.now() + timedelta(seconds=10)) + consumer.timer.schedule.enter_at( + consumer.timer.Entry(lambda x: x, (object(), )), + datetime.now() + timedelta(seconds=10)) + self.assertTrue(panel.handle('dump_schedule')) + + def test_dump_reserved(self): + consumer = Consumer(self.app) + worker_state.reserved_requests.add(Request({ + 'task': self.mytask.name, + 'id': uuid(), + 'args': (2, 2), + 'kwargs': {}, + }, app=self.app)) + try: + panel = self.create_panel(consumer=consumer) + response = panel.handle('dump_reserved', {'safe': True}) + self.assertDictContainsSubset( + {'name': self.mytask.name, + 'args': (2, 2), + 'kwargs': {}, + 'hostname': socket.gethostname()}, + response[0], + ) + worker_state.reserved_requests.clear() + self.assertFalse(panel.handle('dump_reserved')) + finally: + worker_state.reserved_requests.clear() + + def test_rate_limit_invalid_rate_limit_string(self): + e = self.panel.handle('rate_limit', arguments=dict( + task_name='tasks.add', rate_limit='x1240301#%!')) + self.assertIn('Invalid rate limit string', e.get('error')) + + def test_rate_limit(self): + + class xConsumer(object): + reset = False + + def reset_rate_limits(self): + self.reset = True + + consumer = xConsumer() + panel = self.create_panel(app=self.app, consumer=consumer) + + task = self.app.tasks[self.mytask.name] + panel.handle('rate_limit', arguments=dict(task_name=task.name, + rate_limit='100/m')) + self.assertEqual(task.rate_limit, '100/m') + self.assertTrue(consumer.reset) + consumer.reset = False + panel.handle('rate_limit', arguments=dict(task_name=task.name, + rate_limit=0)) + self.assertEqual(task.rate_limit, 0) + self.assertTrue(consumer.reset) + + def test_rate_limit_nonexistant_task(self): + self.panel.handle('rate_limit', arguments={ + 'task_name': 'xxxx.does.not.exist', + 'rate_limit': '1000/s'}) + + def test_unexposed_command(self): + with self.assertRaises(KeyError): + self.panel.handle('foo', arguments={}) + + def test_revoke_with_name(self): + tid = uuid() + m = {'method': 'revoke', + 'destination': hostname, + 'arguments': {'task_id': tid, + 'task_name': self.mytask.name}} + self.panel.handle_message(m, None) + self.assertIn(tid, revoked) + + def test_revoke_with_name_not_in_registry(self): + tid = uuid() + m = {'method': 'revoke', + 'destination': hostname, + 'arguments': {'task_id': tid, + 'task_name': 'xxxxxxxxx33333333388888'}} + self.panel.handle_message(m, None) + self.assertIn(tid, revoked) + + def test_revoke(self): + tid = uuid() + m = {'method': 'revoke', + 'destination': hostname, + 'arguments': {'task_id': tid}} + self.panel.handle_message(m, None) + self.assertIn(tid, revoked) + + m = {'method': 'revoke', + 'destination': 'does.not.exist', + 'arguments': {'task_id': tid + 'xxx'}} + self.panel.handle_message(m, None) + self.assertNotIn(tid + 'xxx', revoked) + + def test_revoke_terminate(self): + request = Mock() + request.id = tid = uuid() + worker_state.reserved_requests.add(request) + try: + r = control.revoke(Mock(), tid, terminate=True) + self.assertIn(tid, revoked) + self.assertTrue(request.terminate.call_count) + self.assertIn('terminate:', r['ok']) + # unknown task id only revokes + r = control.revoke(Mock(), uuid(), terminate=True) + self.assertIn('tasks unknown', r['ok']) + finally: + worker_state.reserved_requests.discard(request) + + def test_autoscale(self): + self.panel.state.consumer = Mock() + self.panel.state.consumer.controller = Mock() + sc = self.panel.state.consumer.controller.autoscaler = Mock() + sc.update.return_value = 10, 2 + m = {'method': 'autoscale', + 'destination': hostname, + 'arguments': {'max': '10', 'min': '2'}} + r = self.panel.handle_message(m, None) + self.assertIn('ok', r) + + self.panel.state.consumer.controller.autoscaler = None + r = self.panel.handle_message(m, None) + self.assertIn('error', r) + + def test_ping(self): + m = {'method': 'ping', + 'destination': hostname} + r = self.panel.handle_message(m, None) + self.assertEqual(r, {'ok': 'pong'}) + + def test_shutdown(self): + m = {'method': 'shutdown', + 'destination': hostname} + with self.assertRaises(SystemExit): + self.panel.handle_message(m, None) + + def test_panel_reply(self): + + replies = [] + + class _Node(pidbox.Node): + + def reply(self, data, exchange, routing_key, **kwargs): + replies.append(data) + + panel = _Node(hostname=hostname, + state=self.create_state(consumer=Consumer(self.app)), + handlers=Panel.data, + mailbox=self.app.control.mailbox) + r = panel.dispatch('ping', reply_to={'exchange': 'x', + 'routing_key': 'x'}) + self.assertEqual(r, {'ok': 'pong'}) + self.assertDictEqual(replies[0], {panel.hostname: {'ok': 'pong'}}) + + def test_pool_restart(self): + consumer = Consumer(self.app) + consumer.controller = _WC(app=self.app) + consumer.controller.consumer = consumer + consumer.controller.pool.restart = Mock() + consumer.reset_rate_limits = Mock(name='reset_rate_limits()') + consumer.update_strategies = Mock(name='update_strategies()') + consumer.event_dispatcher = Mock(name='evd') + panel = self.create_panel(consumer=consumer) + assert panel.state.consumer.controller.consumer is consumer + panel.app = self.app + _import = panel.app.loader.import_from_cwd = Mock() + _reload = Mock() + + with self.assertRaises(ValueError): + panel.handle('pool_restart', {'reloader': _reload}) + + self.app.conf.CELERYD_POOL_RESTARTS = True + panel.handle('pool_restart', {'reloader': _reload}) + self.assertTrue(consumer.controller.pool.restart.called) + consumer.reset_rate_limits.assert_called_with() + consumer.update_strategies.assert_called_with() + self.assertFalse(_reload.called) + self.assertFalse(_import.called) + + def test_pool_restart_import_modules(self): + consumer = Consumer(self.app) + consumer.controller = _WC(app=self.app) + consumer.controller.consumer = consumer + consumer.controller.pool.restart = Mock() + consumer.reset_rate_limits = Mock(name='reset_rate_limits()') + consumer.update_strategies = Mock(name='update_strategies()') + panel = self.create_panel(consumer=consumer) + panel.app = self.app + assert panel.state.consumer.controller.consumer is consumer + _import = consumer.controller.app.loader.import_from_cwd = Mock() + _reload = Mock() + + self.app.conf.CELERYD_POOL_RESTARTS = True + panel.handle('pool_restart', {'modules': ['foo', 'bar'], + 'reloader': _reload}) + + self.assertTrue(consumer.controller.pool.restart.called) + consumer.reset_rate_limits.assert_called_with() + consumer.update_strategies.assert_called_with() + self.assertFalse(_reload.called) + self.assertItemsEqual( + [call('bar'), call('foo')], + _import.call_args_list, + ) + + def test_pool_restart_reload_modules(self): + consumer = Consumer(self.app) + consumer.controller = _WC(app=self.app) + consumer.controller.consumer = consumer + consumer.controller.pool.restart = Mock() + consumer.reset_rate_limits = Mock(name='reset_rate_limits()') + consumer.update_strategies = Mock(name='update_strategies()') + panel = self.create_panel(consumer=consumer) + panel.app = self.app + _import = panel.app.loader.import_from_cwd = Mock() + _reload = Mock() + + self.app.conf.CELERYD_POOL_RESTARTS = True + with patch.dict(sys.modules, {'foo': None}): + panel.handle('pool_restart', {'modules': ['foo'], + 'reload': False, + 'reloader': _reload}) + + self.assertTrue(consumer.controller.pool.restart.called) + self.assertFalse(_reload.called) + self.assertFalse(_import.called) + + _import.reset_mock() + _reload.reset_mock() + consumer.controller.pool.restart.reset_mock() + + panel.handle('pool_restart', {'modules': ['foo'], + 'reload': True, + 'reloader': _reload}) + + self.assertTrue(consumer.controller.pool.restart.called) + self.assertTrue(_reload.called) + self.assertFalse(_import.called) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_heartbeat.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_heartbeat.py new file mode 100644 index 0000000..50559ca --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_heartbeat.py @@ -0,0 +1,73 @@ +from __future__ import absolute_import + +from celery.worker.heartbeat import Heart +from celery.tests.case import AppCase + + +class MockDispatcher(object): + heart = None + next_iter = 0 + + def __init__(self): + self.sent = [] + self.on_enabled = set() + self.on_disabled = set() + self.enabled = True + + def send(self, msg, **_fields): + self.sent.append(msg) + if self.heart: + if self.next_iter > 10: + self.heart._shutdown.set() + self.next_iter += 1 + + +class MockDispatcherRaising(object): + + def send(self, msg): + if msg == 'worker-offline': + raise Exception('foo') + + +class MockTimer(object): + + def call_repeatedly(self, secs, fun, args=(), kwargs={}): + + class entry(tuple): + canceled = False + + def cancel(self): + self.canceled = True + + return entry((secs, fun, args, kwargs)) + + def cancel(self, entry): + entry.cancel() + + +class test_Heart(AppCase): + + def test_start_stop(self): + timer = MockTimer() + eventer = MockDispatcher() + h = Heart(timer, eventer, interval=1) + h.start() + self.assertTrue(h.tref) + h.stop() + self.assertIsNone(h.tref) + h.stop() + + def test_start_when_disabled(self): + timer = MockTimer() + eventer = MockDispatcher() + eventer.enabled = False + h = Heart(timer, eventer) + h.start() + self.assertFalse(h.tref) + + def test_stop_when_disabled(self): + timer = MockTimer() + eventer = MockDispatcher() + eventer.enabled = False + h = Heart(timer, eventer) + h.stop() diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_hub.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_hub.py new file mode 100644 index 0000000..e84abf3 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_hub.py @@ -0,0 +1,342 @@ +from __future__ import absolute_import + +from kombu.async import Hub, READ, WRITE, ERR +from kombu.async.debug import callback_for, repr_flag, _rcb +from kombu.async.semaphore import DummyLock, LaxBoundedSemaphore + +from celery.five import range +from celery.tests.case import Case, Mock, call, patch + + +class File(object): + + def __init__(self, fd): + self.fd = fd + + def fileno(self): + return self.fd + + def __eq__(self, other): + if isinstance(other, File): + return self.fd == other.fd + return NotImplemented + + def __hash__(self): + return hash(self.fd) + + +class test_DummyLock(Case): + + def test_context(self): + mutex = DummyLock() + with mutex: + pass + + +class test_LaxBoundedSemaphore(Case): + + def test_acquire_release(self): + x = LaxBoundedSemaphore(2) + + c1 = Mock() + x.acquire(c1, 1) + self.assertEqual(x.value, 1) + c1.assert_called_with(1) + + c2 = Mock() + x.acquire(c2, 2) + self.assertEqual(x.value, 0) + c2.assert_called_with(2) + + c3 = Mock() + x.acquire(c3, 3) + self.assertEqual(x.value, 0) + self.assertFalse(c3.called) + + x.release() + self.assertEqual(x.value, 0) + x.release() + self.assertEqual(x.value, 1) + x.release() + self.assertEqual(x.value, 2) + c3.assert_called_with(3) + + def test_bounded(self): + x = LaxBoundedSemaphore(2) + for i in range(100): + x.release() + self.assertEqual(x.value, 2) + + def test_grow_shrink(self): + x = LaxBoundedSemaphore(1) + self.assertEqual(x.initial_value, 1) + cb1 = Mock() + x.acquire(cb1, 1) + cb1.assert_called_with(1) + self.assertEqual(x.value, 0) + + cb2 = Mock() + x.acquire(cb2, 2) + self.assertFalse(cb2.called) + self.assertEqual(x.value, 0) + + cb3 = Mock() + x.acquire(cb3, 3) + self.assertFalse(cb3.called) + + x.grow(2) + cb2.assert_called_with(2) + cb3.assert_called_with(3) + self.assertEqual(x.value, 2) + self.assertEqual(x.initial_value, 3) + + self.assertFalse(x._waiting) + x.grow(3) + for i in range(x.initial_value): + self.assertTrue(x.acquire(Mock())) + self.assertFalse(x.acquire(Mock())) + x.clear() + + x.shrink(3) + for i in range(x.initial_value): + self.assertTrue(x.acquire(Mock())) + self.assertFalse(x.acquire(Mock())) + self.assertEqual(x.value, 0) + + for i in range(100): + x.release() + self.assertEqual(x.value, x.initial_value) + + def test_clear(self): + x = LaxBoundedSemaphore(10) + for i in range(11): + x.acquire(Mock()) + self.assertTrue(x._waiting) + self.assertEqual(x.value, 0) + + x.clear() + self.assertFalse(x._waiting) + self.assertEqual(x.value, x.initial_value) + + +class test_Hub(Case): + + def test_repr_flag(self): + self.assertEqual(repr_flag(READ), 'R') + self.assertEqual(repr_flag(WRITE), 'W') + self.assertEqual(repr_flag(ERR), '!') + self.assertEqual(repr_flag(READ | WRITE), 'RW') + self.assertEqual(repr_flag(READ | ERR), 'R!') + self.assertEqual(repr_flag(WRITE | ERR), 'W!') + self.assertEqual(repr_flag(READ | WRITE | ERR), 'RW!') + + def test_repr_callback_rcb(self): + + def f(): + pass + + self.assertEqual(_rcb(f), f.__name__) + self.assertEqual(_rcb('foo'), 'foo') + + @patch('kombu.async.hub.poll') + def test_start_stop(self, poll): + hub = Hub() + poll.assert_called_with() + + poller = hub.poller + hub.stop() + hub.close() + poller.close.assert_called_with() + + def test_fire_timers(self): + hub = Hub() + hub.timer = Mock() + hub.timer._queue = [] + self.assertEqual(hub.fire_timers(min_delay=42.324, + max_delay=32.321), 32.321) + + hub.timer._queue = [1] + hub.scheduler = iter([(3.743, None)]) + self.assertEqual(hub.fire_timers(), 3.743) + + e1, e2, e3 = Mock(), Mock(), Mock() + entries = [e1, e2, e3] + + def reset(): + return [m.reset() for m in [e1, e2, e3]] + + def se(): + while 1: + while entries: + yield None, entries.pop() + yield 3.982, None + hub.scheduler = se() + + self.assertEqual(hub.fire_timers(max_timers=10), 3.982) + for E in [e3, e2, e1]: + E.assert_called_with() + reset() + + entries[:] = [Mock() for _ in range(11)] + keep = list(entries) + self.assertEqual(hub.fire_timers(max_timers=10, min_delay=1.13), 1.13) + for E in reversed(keep[1:]): + E.assert_called_with() + reset() + self.assertEqual(hub.fire_timers(max_timers=10), 3.982) + keep[0].assert_called_with() + + def test_fire_timers_raises(self): + hub = Hub() + eback = Mock() + eback.side_effect = KeyError('foo') + hub.timer = Mock() + hub.scheduler = iter([(0, eback)]) + with self.assertRaises(KeyError): + hub.fire_timers(propagate=(KeyError, )) + + eback.side_effect = ValueError('foo') + hub.scheduler = iter([(0, eback)]) + with patch('kombu.async.hub.logger') as logger: + with self.assertRaises(StopIteration): + hub.fire_timers() + self.assertTrue(logger.error.called) + + def test_add_raises_ValueError(self): + hub = Hub() + hub.poller = Mock(name='hub.poller') + hub.poller.register.side_effect = ValueError() + hub._discard = Mock(name='hub.discard') + with self.assertRaises(ValueError): + hub.add(2, Mock(), READ) + hub._discard.assert_called_with(2) + + def test_repr_active(self): + hub = Hub() + hub.readers = {1: Mock(), 2: Mock()} + hub.writers = {3: Mock(), 4: Mock()} + for value in list(hub.readers.values()) + list(hub.writers.values()): + value.__name__ = 'mock' + self.assertTrue(hub.repr_active()) + + def test_repr_events(self): + hub = Hub() + hub.readers = {6: Mock(), 7: Mock(), 8: Mock()} + hub.writers = {9: Mock()} + for value in list(hub.readers.values()) + list(hub.writers.values()): + value.__name__ = 'mock' + self.assertTrue(hub.repr_events([ + (6, READ), + (7, ERR), + (8, READ | ERR), + (9, WRITE), + (10, 13213), + ])) + + def test_callback_for(self): + hub = Hub() + reader, writer = Mock(), Mock() + hub.readers = {6: reader} + hub.writers = {7: writer} + + self.assertEqual(callback_for(hub, 6, READ), reader) + self.assertEqual(callback_for(hub, 7, WRITE), writer) + with self.assertRaises(KeyError): + callback_for(hub, 6, WRITE) + self.assertEqual(callback_for(hub, 6, WRITE, 'foo'), 'foo') + + def test_add_remove_readers(self): + hub = Hub() + P = hub.poller = Mock() + + read_A = Mock() + read_B = Mock() + hub.add_reader(10, read_A, 10) + hub.add_reader(File(11), read_B, 11) + + P.register.assert_has_calls([ + call(10, hub.READ | hub.ERR), + call(11, hub.READ | hub.ERR), + ], any_order=True) + + self.assertEqual(hub.readers[10], (read_A, (10, ))) + self.assertEqual(hub.readers[11], (read_B, (11, ))) + + hub.remove(10) + self.assertNotIn(10, hub.readers) + hub.remove(File(11)) + self.assertNotIn(11, hub.readers) + P.unregister.assert_has_calls([ + call(10), call(11), + ]) + + def test_can_remove_unknown_fds(self): + hub = Hub() + hub.poller = Mock() + hub.remove(30) + hub.remove(File(301)) + + def test_remove__unregister_raises(self): + hub = Hub() + hub.poller = Mock() + hub.poller.unregister.side_effect = OSError() + + hub.remove(313) + + def test_add_writers(self): + hub = Hub() + P = hub.poller = Mock() + + write_A = Mock() + write_B = Mock() + hub.add_writer(20, write_A) + hub.add_writer(File(21), write_B) + + P.register.assert_has_calls([ + call(20, hub.WRITE), + call(21, hub.WRITE), + ], any_order=True) + + self.assertEqual(hub.writers[20], (write_A, ())) + self.assertEqual(hub.writers[21], (write_B, ())) + + hub.remove(20) + self.assertNotIn(20, hub.writers) + hub.remove(File(21)) + self.assertNotIn(21, hub.writers) + P.unregister.assert_has_calls([ + call(20), call(21), + ]) + + def test_enter__exit(self): + hub = Hub() + P = hub.poller = Mock() + on_close = Mock() + hub.on_close.add(on_close) + + try: + read_A = Mock() + read_B = Mock() + hub.add_reader(10, read_A) + hub.add_reader(File(11), read_B) + write_A = Mock() + write_B = Mock() + hub.add_writer(20, write_A) + hub.add_writer(File(21), write_B) + self.assertTrue(hub.readers) + self.assertTrue(hub.writers) + finally: + assert hub.poller + hub.close() + self.assertFalse(hub.readers) + self.assertFalse(hub.writers) + + P.unregister.assert_has_calls([ + call(10), call(11), call(20), call(21), + ], any_order=True) + + on_close.assert_called_with(hub) + + def test_scheduler_property(self): + hub = Hub(timer=[1, 2, 3]) + self.assertEqual(list(hub.scheduler), [1, 2, 3]) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_loops.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_loops.py new file mode 100644 index 0000000..be8d3a1 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_loops.py @@ -0,0 +1,425 @@ +from __future__ import absolute_import + +import socket + +from kombu.async import Hub, READ, WRITE, ERR + +from celery.bootsteps import CLOSE, RUN +from celery.exceptions import InvalidTaskError, WorkerShutdown, WorkerTerminate +from celery.five import Empty +from celery.worker import state +from celery.worker.consumer import Consumer +from celery.worker.loops import asynloop, synloop + +from celery.tests.case import AppCase, Mock, body_from_sig + + +class X(object): + + def __init__(self, app, heartbeat=None, on_task_message=None, + transport_driver_type=None): + hub = Hub() + ( + self.obj, + self.connection, + self.consumer, + self.blueprint, + self.hub, + self.qos, + self.heartbeat, + self.clock, + ) = self.args = [Mock(name='obj'), + Mock(name='connection'), + Mock(name='consumer'), + Mock(name='blueprint'), + hub, + Mock(name='qos'), + heartbeat, + Mock(name='clock')] + self.connection.supports_heartbeats = True + self.connection.get_heartbeat_interval.side_effect = ( + lambda: self.heartbeat + ) + self.consumer.callbacks = [] + self.obj.strategies = {} + self.connection.connection_errors = (socket.error, ) + if transport_driver_type: + self.connection.transport.driver_type = transport_driver_type + self.hub.readers = {} + self.hub.writers = {} + self.hub.consolidate = set() + self.hub.timer = Mock(name='hub.timer') + self.hub.timer._queue = [Mock()] + self.hub.fire_timers = Mock(name='hub.fire_timers') + self.hub.fire_timers.return_value = 1.7 + self.hub.poller = Mock(name='hub.poller') + self.hub.close = Mock(name='hub.close()') # asynloop calls hub.close + self.Hub = self.hub + self.blueprint.state = RUN + # need this for create_task_handler + _consumer = Consumer(Mock(), timer=Mock(), app=app) + _consumer.on_task_message = on_task_message or [] + self.obj.create_task_handler = _consumer.create_task_handler + self.on_unknown_message = self.obj.on_unknown_message = Mock( + name='on_unknown_message', + ) + _consumer.on_unknown_message = self.on_unknown_message + self.on_unknown_task = self.obj.on_unknown_task = Mock( + name='on_unknown_task', + ) + _consumer.on_unknown_task = self.on_unknown_task + self.on_invalid_task = self.obj.on_invalid_task = Mock( + name='on_invalid_task', + ) + _consumer.on_invalid_task = self.on_invalid_task + _consumer.strategies = self.obj.strategies + + def timeout_then_error(self, mock): + + def first(*args, **kwargs): + mock.side_effect = socket.error() + self.connection.more_to_read = False + raise socket.timeout() + mock.side_effect = first + + def close_then_error(self, mock=None, mod=0, exc=None): + mock = Mock() if mock is None else mock + + def first(*args, **kwargs): + if not mod or mock.call_count > mod: + self.close() + self.connection.more_to_read = False + raise (socket.error() if exc is None else exc) + mock.side_effect = first + return mock + + def close(self, *args, **kwargs): + self.blueprint.state = CLOSE + + def closer(self, mock=None, mod=0): + mock = Mock() if mock is None else mock + + def closing(*args, **kwargs): + if not mod or mock.call_count >= mod: + self.close() + mock.side_effect = closing + return mock + + +def get_task_callback(*args, **kwargs): + x = X(*args, **kwargs) + x.blueprint.state = CLOSE + asynloop(*x.args) + return x, x.consumer.callbacks[0] + + +class test_asynloop(AppCase): + + def setup(self): + + @self.app.task(shared=False) + def add(x, y): + return x + y + self.add = add + + def test_drain_after_consume(self): + x, _ = get_task_callback(self.app, transport_driver_type='amqp') + self.assertIn( + x.connection.drain_events, [p.fun for p in x.hub._ready], + ) + + def test_setup_heartbeat(self): + x = X(self.app, heartbeat=10) + x.hub.call_repeatedly = Mock(name='x.hub.call_repeatedly()') + x.blueprint.state = CLOSE + asynloop(*x.args) + x.consumer.consume.assert_called_with() + x.obj.on_ready.assert_called_with() + x.hub.call_repeatedly.assert_called_with( + 10 / 2.0, x.connection.heartbeat_check, 2.0, + ) + + def task_context(self, sig, **kwargs): + x, on_task = get_task_callback(self.app, **kwargs) + body = body_from_sig(self.app, sig) + message = Mock() + strategy = x.obj.strategies[sig.task] = Mock() + return x, on_task, body, message, strategy + + def test_on_task_received(self): + _, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) + on_task(body, msg) + strategy.assert_called_with( + msg, body, msg.ack_log_error, msg.reject_log_error, [], + ) + + def test_on_task_received_executes_on_task_message(self): + cbs = [Mock(), Mock(), Mock()] + _, on_task, body, msg, strategy = self.task_context( + self.add.s(2, 2), on_task_message=cbs, + ) + on_task(body, msg) + strategy.assert_called_with( + msg, body, msg.ack_log_error, msg.reject_log_error, cbs, + ) + + def test_on_task_message_missing_name(self): + x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) + body.pop('task') + on_task(body, msg) + x.on_unknown_message.assert_called_with(body, msg) + + def test_on_task_not_registered(self): + x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) + exc = strategy.side_effect = KeyError(self.add.name) + on_task(body, msg) + x.on_unknown_task.assert_called_with(body, msg, exc) + + def test_on_task_InvalidTaskError(self): + x, on_task, body, msg, strategy = self.task_context(self.add.s(2, 2)) + exc = strategy.side_effect = InvalidTaskError() + on_task(body, msg) + x.on_invalid_task.assert_called_with(body, msg, exc) + + def test_should_terminate(self): + x = X(self.app) + # XXX why aren't the errors propagated?!? + state.should_terminate = True + try: + with self.assertRaises(WorkerTerminate): + asynloop(*x.args) + finally: + state.should_terminate = False + + def test_should_terminate_hub_close_raises(self): + x = X(self.app) + # XXX why aren't the errors propagated?!? + state.should_terminate = True + x.hub.close.side_effect = MemoryError() + try: + with self.assertRaises(WorkerTerminate): + asynloop(*x.args) + finally: + state.should_terminate = False + + def test_should_stop(self): + x = X(self.app) + state.should_stop = True + try: + with self.assertRaises(WorkerShutdown): + asynloop(*x.args) + finally: + state.should_stop = False + + def test_updates_qos(self): + x = X(self.app) + x.qos.prev = 3 + x.qos.value = 3 + x.hub.on_tick.add(x.closer(mod=2)) + x.hub.timer._queue = [1] + asynloop(*x.args) + self.assertFalse(x.qos.update.called) + + x = X(self.app) + x.qos.prev = 1 + x.qos.value = 6 + x.hub.on_tick.add(x.closer(mod=2)) + asynloop(*x.args) + x.qos.update.assert_called_with() + x.hub.fire_timers.assert_called_with(propagate=(socket.error, )) + + def test_poll_empty(self): + x = X(self.app) + x.hub.readers = {6: Mock()} + x.hub.timer._queue = [1] + x.close_then_error(x.hub.poller.poll) + x.hub.fire_timers.return_value = 33.37 + poller = x.hub.poller + poller.poll.return_value = [] + with self.assertRaises(socket.error): + asynloop(*x.args) + poller.poll.assert_called_with(33.37) + + def test_poll_readable(self): + x = X(self.app) + reader = Mock(name='reader') + x.hub.add_reader(6, reader, 6) + x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), mod=4)) + poller = x.hub.poller + poller.poll.return_value = [(6, READ)] + with self.assertRaises(socket.error): + asynloop(*x.args) + reader.assert_called_with(6) + self.assertTrue(poller.poll.called) + + def test_poll_readable_raises_Empty(self): + x = X(self.app) + reader = Mock(name='reader') + x.hub.add_reader(6, reader, 6) + x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) + poller = x.hub.poller + poller.poll.return_value = [(6, READ)] + reader.side_effect = Empty() + with self.assertRaises(socket.error): + asynloop(*x.args) + reader.assert_called_with(6) + self.assertTrue(poller.poll.called) + + def test_poll_writable(self): + x = X(self.app) + writer = Mock(name='writer') + x.hub.add_writer(6, writer, 6) + x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) + poller = x.hub.poller + poller.poll.return_value = [(6, WRITE)] + with self.assertRaises(socket.error): + asynloop(*x.args) + writer.assert_called_with(6) + self.assertTrue(poller.poll.called) + + def test_poll_writable_none_registered(self): + x = X(self.app) + writer = Mock(name='writer') + x.hub.add_writer(6, writer, 6) + x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) + poller = x.hub.poller + poller.poll.return_value = [(7, WRITE)] + with self.assertRaises(socket.error): + asynloop(*x.args) + self.assertTrue(poller.poll.called) + + def test_poll_unknown_event(self): + x = X(self.app) + writer = Mock(name='reader') + x.hub.add_writer(6, writer, 6) + x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) + poller = x.hub.poller + poller.poll.return_value = [(6, 0)] + with self.assertRaises(socket.error): + asynloop(*x.args) + self.assertTrue(poller.poll.called) + + def test_poll_keep_draining_disabled(self): + x = X(self.app) + x.hub.writers = {6: Mock()} + poll = x.hub.poller.poll + + def se(*args, **kwargs): + poll.side_effect = socket.error() + poll.side_effect = se + + poller = x.hub.poller + poll.return_value = [(6, 0)] + with self.assertRaises(socket.error): + asynloop(*x.args) + self.assertTrue(poller.poll.called) + + def test_poll_err_writable(self): + x = X(self.app) + writer = Mock(name='writer') + x.hub.add_writer(6, writer, 6, 48) + x.hub.on_tick.add(x.close_then_error(Mock(), 2)) + poller = x.hub.poller + poller.poll.return_value = [(6, ERR)] + with self.assertRaises(socket.error): + asynloop(*x.args) + writer.assert_called_with(6, 48) + self.assertTrue(poller.poll.called) + + def test_poll_write_generator(self): + x = X(self.app) + x.hub.remove = Mock(name='hub.remove()') + + def Gen(): + yield 1 + yield 2 + gen = Gen() + + x.hub.add_writer(6, gen) + x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) + x.hub.poller.poll.return_value = [(6, WRITE)] + with self.assertRaises(socket.error): + asynloop(*x.args) + self.assertTrue(gen.gi_frame.f_lasti != -1) + self.assertFalse(x.hub.remove.called) + + def test_poll_write_generator_stopped(self): + x = X(self.app) + + def Gen(): + raise StopIteration() + yield + gen = Gen() + x.hub.add_writer(6, gen) + x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) + x.hub.poller.poll.return_value = [(6, WRITE)] + x.hub.remove = Mock(name='hub.remove()') + with self.assertRaises(socket.error): + asynloop(*x.args) + self.assertIsNone(gen.gi_frame) + + def test_poll_write_generator_raises(self): + x = X(self.app) + + def Gen(): + raise ValueError('foo') + yield + gen = Gen() + x.hub.add_writer(6, gen) + x.hub.remove = Mock(name='hub.remove()') + x.hub.on_tick.add(x.close_then_error(Mock(name='tick'), 2)) + x.hub.poller.poll.return_value = [(6, WRITE)] + with self.assertRaises(ValueError): + asynloop(*x.args) + self.assertIsNone(gen.gi_frame) + x.hub.remove.assert_called_with(6) + + def test_poll_err_readable(self): + x = X(self.app) + reader = Mock(name='reader') + x.hub.add_reader(6, reader, 6, 24) + x.hub.on_tick.add(x.close_then_error(Mock(), 2)) + poller = x.hub.poller + poller.poll.return_value = [(6, ERR)] + with self.assertRaises(socket.error): + asynloop(*x.args) + reader.assert_called_with(6, 24) + self.assertTrue(poller.poll.called) + + def test_poll_raises_ValueError(self): + x = X(self.app) + x.hub.readers = {6: Mock()} + poller = x.hub.poller + x.close_then_error(poller.poll, exc=ValueError) + asynloop(*x.args) + self.assertTrue(poller.poll.called) + + +class test_synloop(AppCase): + + def test_timeout_ignored(self): + x = X(self.app) + x.timeout_then_error(x.connection.drain_events) + with self.assertRaises(socket.error): + synloop(*x.args) + self.assertEqual(x.connection.drain_events.call_count, 2) + + def test_updates_qos_when_changed(self): + x = X(self.app) + x.qos.prev = 2 + x.qos.value = 2 + x.timeout_then_error(x.connection.drain_events) + with self.assertRaises(socket.error): + synloop(*x.args) + self.assertFalse(x.qos.update.called) + + x.qos.value = 4 + x.timeout_then_error(x.connection.drain_events) + with self.assertRaises(socket.error): + synloop(*x.args) + x.qos.update.assert_called_with() + + def test_ignores_socket_errors_when_closed(self): + x = X(self.app) + x.close_then_error(x.connection.drain_events) + self.assertIsNone(synloop(*x.args)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_request.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_request.py new file mode 100644 index 0000000..16efcd7 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_request.py @@ -0,0 +1,969 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import, unicode_literals + +import anyjson +import os +import signal +import socket +import sys + +from datetime import datetime, timedelta + +from billiard.einfo import ExceptionInfo +from kombu.transport.base import Message +from kombu.utils.encoding import from_utf8, default_encode + +from celery import states +from celery.app.trace import ( + trace_task, + _trace_task_ret, + TraceInfo, + mro_lookup, + build_tracer, + setup_worker_optimizations, + reset_worker_optimizations, +) +from celery.concurrency.base import BasePool +from celery.exceptions import ( + Ignore, + InvalidTaskError, + Retry, + TaskRevokedError, + Terminated, + WorkerLostError, +) +from celery.five import keys, monotonic +from celery.signals import task_revoked +from celery.utils import uuid +from celery.worker import job as module +from celery.worker.job import Request, logger as req_logger +from celery.worker.state import revoked + +from celery.tests.case import ( + AppCase, + Case, + Mock, + SkipTest, + assert_signal_called, + body_from_sig, + patch, +) + + +class test_mro_lookup(Case): + + def test_order(self): + + class A(object): + pass + + class B(A): + pass + + class C(B): + pass + + class D(C): + + @classmethod + def mro(cls): + return () + + A.x = 10 + self.assertEqual(mro_lookup(C, 'x'), A) + self.assertIsNone(mro_lookup(C, 'x', stop=(A, ))) + B.x = 10 + self.assertEqual(mro_lookup(C, 'x'), B) + C.x = 10 + self.assertEqual(mro_lookup(C, 'x'), C) + self.assertIsNone(mro_lookup(D, 'x')) + + +def jail(app, task_id, name, args, kwargs): + request = {'id': task_id} + task = app.tasks[name] + task.__trace__ = None # rebuild + return trace_task( + task, task_id, args, kwargs, request=request, eager=False, app=app, + ) + + +class test_default_encode(AppCase): + + def setup(self): + if sys.version_info >= (3, 0): + raise SkipTest('py3k: not relevant') + + def test_jython(self): + prev, sys.platform = sys.platform, 'java 1.6.1' + try: + self.assertEqual(default_encode(bytes('foo')), 'foo') + finally: + sys.platform = prev + + def test_cpython(self): + prev, sys.platform = sys.platform, 'darwin' + gfe, sys.getfilesystemencoding = ( + sys.getfilesystemencoding, + lambda: 'utf-8', + ) + try: + self.assertEqual(default_encode(bytes('foo')), 'foo') + finally: + sys.platform = prev + sys.getfilesystemencoding = gfe + + +class test_Retry(AppCase): + + def test_retry_semipredicate(self): + try: + raise Exception('foo') + except Exception as exc: + ret = Retry('Retrying task', exc) + self.assertEqual(ret.exc, exc) + + +class test_trace_task(AppCase): + + def setup(self): + + @self.app.task(shared=False) + def mytask(i, **kwargs): + return i ** i + self.mytask = mytask + + @self.app.task(shared=False) + def mytask_raising(i): + raise KeyError(i) + self.mytask_raising = mytask_raising + + @patch('celery.app.trace._logger') + def test_process_cleanup_fails(self, _logger): + self.mytask.backend = Mock() + self.mytask.backend.process_cleanup = Mock(side_effect=KeyError()) + tid = uuid() + ret = jail(self.app, tid, self.mytask.name, [2], {}) + self.assertEqual(ret, 4) + self.assertTrue(self.mytask.backend.store_result.called) + self.assertIn('Process cleanup failed', _logger.error.call_args[0][0]) + + def test_process_cleanup_BaseException(self): + self.mytask.backend = Mock() + self.mytask.backend.process_cleanup = Mock(side_effect=SystemExit()) + with self.assertRaises(SystemExit): + jail(self.app, uuid(), self.mytask.name, [2], {}) + + def test_execute_jail_success(self): + ret = jail(self.app, uuid(), self.mytask.name, [2], {}) + self.assertEqual(ret, 4) + + def test_marked_as_started(self): + _started = [] + + def store_result(tid, meta, state, **kwars): + if state == states.STARTED: + _started.append(tid) + self.mytask.backend.store_result = Mock(name='store_result') + self.mytask.backend.store_result.side_effect = store_result + self.mytask.track_started = True + + tid = uuid() + jail(self.app, tid, self.mytask.name, [2], {}) + self.assertIn(tid, _started) + + self.mytask.ignore_result = True + tid = uuid() + jail(self.app, tid, self.mytask.name, [2], {}) + self.assertNotIn(tid, _started) + + def test_execute_jail_failure(self): + ret = jail( + self.app, uuid(), self.mytask_raising.name, [4], {}, + ) + self.assertIsInstance(ret, ExceptionInfo) + self.assertTupleEqual(ret.exception.args, (4, )) + + def test_execute_ignore_result(self): + + @self.app.task(shared=False, ignore_result=True) + def ignores_result(i): + return i ** i + + task_id = uuid() + ret = jail(self.app, task_id, ignores_result.name, [4], {}) + self.assertEqual(ret, 256) + self.assertFalse(self.app.AsyncResult(task_id).ready()) + + +class MockEventDispatcher(object): + + def __init__(self): + self.sent = [] + self.enabled = True + + def send(self, event, **fields): + self.sent.append(event) + + +class test_Request(AppCase): + + def setup(self): + + @self.app.task(shared=False) + def add(x, y, **kw_): + return x + y + self.add = add + + @self.app.task(shared=False) + def mytask(i, **kwargs): + return i ** i + self.mytask = mytask + + @self.app.task(shared=False) + def mytask_raising(i): + raise KeyError(i) + self.mytask_raising = mytask_raising + + def get_request(self, sig, Request=Request, **kwargs): + return Request( + body_from_sig(self.app, sig), + on_ack=Mock(), + eventer=Mock(), + app=self.app, + connection_errors=(socket.error, ), + task=sig.type, + **kwargs + ) + + def test_invalid_eta_raises_InvalidTaskError(self): + with self.assertRaises(InvalidTaskError): + self.get_request(self.add.s(2, 2).set(eta='12345')) + + def test_invalid_expires_raises_InvalidTaskError(self): + with self.assertRaises(InvalidTaskError): + self.get_request(self.add.s(2, 2).set(expires='12345')) + + def test_valid_expires_with_utc_makes_aware(self): + with patch('celery.worker.job.maybe_make_aware') as mma: + self.get_request(self.add.s(2, 2).set(expires=10)) + self.assertTrue(mma.called) + + def test_maybe_expire_when_expires_is_None(self): + req = self.get_request(self.add.s(2, 2)) + self.assertFalse(req.maybe_expire()) + + def test_on_retry_acks_if_late(self): + self.add.acks_late = True + req = self.get_request(self.add.s(2, 2)) + req.on_retry(Mock()) + req.on_ack.assert_called_with(req_logger, req.connection_errors) + + def test_on_failure_Termianted(self): + einfo = None + try: + raise Terminated('9') + except Terminated: + einfo = ExceptionInfo() + self.assertIsNotNone(einfo) + req = self.get_request(self.add.s(2, 2)) + req.on_failure(einfo) + req.eventer.send.assert_called_with( + 'task-revoked', + uuid=req.id, terminated=True, signum='9', expired=False, + ) + + def test_log_error_propagates_MemoryError(self): + einfo = None + try: + raise MemoryError() + except MemoryError: + einfo = ExceptionInfo(internal=True) + self.assertIsNotNone(einfo) + req = self.get_request(self.add.s(2, 2)) + with self.assertRaises(MemoryError): + req._log_error(einfo) + + def test_log_error_when_Ignore(self): + einfo = None + try: + raise Ignore() + except Ignore: + einfo = ExceptionInfo(internal=True) + self.assertIsNotNone(einfo) + req = self.get_request(self.add.s(2, 2)) + req._log_error(einfo) + req.on_ack.assert_called_with(req_logger, req.connection_errors) + + def test_tzlocal_is_cached(self): + req = self.get_request(self.add.s(2, 2)) + req._tzlocal = 'foo' + self.assertEqual(req.tzlocal, 'foo') + + def test_execute_magic_kwargs(self): + task = self.add.s(2, 2) + task.freeze() + req = self.get_request(task) + self.add.accept_magic_kwargs = True + pool = Mock() + req.execute_using_pool(pool) + self.assertTrue(pool.apply_async.called) + args = pool.apply_async.call_args[1]['args'] + self.assertEqual(args[0], task.task) + self.assertEqual(args[1], task.id) + self.assertEqual(args[2], task.args) + kwargs = args[3] + self.assertEqual(kwargs.get('task_name'), task.task) + + def xRequest(self, body=None, **kwargs): + body = dict({'task': self.mytask.name, + 'id': uuid(), + 'args': [1], + 'kwargs': {'f': 'x'}}, **body or {}) + return Request(body, app=self.app, **kwargs) + + def test_task_wrapper_repr(self): + self.assertTrue(repr(self.xRequest())) + + @patch('celery.worker.job.kwdict') + def test_kwdict(self, kwdict): + prev, module.NEEDS_KWDICT = module.NEEDS_KWDICT, True + try: + self.xRequest() + self.assertTrue(kwdict.called) + finally: + module.NEEDS_KWDICT = prev + + def test_sets_store_errors(self): + self.mytask.ignore_result = True + job = self.xRequest() + self.assertFalse(job.store_errors) + + self.mytask.store_errors_even_if_ignored = True + job = self.xRequest() + self.assertTrue(job.store_errors) + + def test_send_event(self): + job = self.xRequest() + job.eventer = MockEventDispatcher() + job.send_event('task-frobulated') + self.assertIn('task-frobulated', job.eventer.sent) + + def test_send_events__disabled_at_task_level(self): + job = self.xRequest() + job.task.send_events = False + job.eventer = Mock(name='.eventer') + job.send_event('task-frobulated') + job.eventer.send.assert_not_called() + + def test_on_retry(self): + job = Request({ + 'task': self.mytask.name, + 'id': uuid(), + 'args': [1], + 'kwargs': {'f': 'x'}, + }, app=self.app) + job.eventer = MockEventDispatcher() + try: + raise Retry('foo', KeyError('moofoobar')) + except: + einfo = ExceptionInfo() + job.on_failure(einfo) + self.assertIn('task-retried', job.eventer.sent) + prev, module._does_info = module._does_info, False + try: + job.on_failure(einfo) + finally: + module._does_info = prev + einfo.internal = True + job.on_failure(einfo) + + def test_compat_properties(self): + job = Request({ + 'task': self.mytask.name, + 'id': uuid(), + 'args': [1], + 'kwargs': {'f': 'x'}, + }, app=self.app) + self.assertEqual(job.task_id, job.id) + self.assertEqual(job.task_name, job.name) + job.task_id = 'ID' + self.assertEqual(job.id, 'ID') + job.task_name = 'NAME' + self.assertEqual(job.name, 'NAME') + + def test_terminate__task_started(self): + pool = Mock() + signum = signal.SIGTERM + job = Request({ + 'task': self.mytask.name, + 'id': uuid(), + 'args': [1], + 'kwrgs': {'f': 'x'}, + }, app=self.app) + with assert_signal_called( + task_revoked, sender=job.task, request=job, + terminated=True, expired=False, signum=signum): + job.time_start = monotonic() + job.worker_pid = 313 + job.terminate(pool, signal='TERM') + pool.terminate_job.assert_called_with(job.worker_pid, signum) + + def test_terminate__task_reserved(self): + pool = Mock() + job = Request({ + 'task': self.mytask.name, + 'id': uuid(), + 'args': [1], + 'kwargs': {'f': 'x'}, + }, app=self.app) + job.time_start = None + job.terminate(pool, signal='TERM') + self.assertFalse(pool.terminate_job.called) + self.assertTupleEqual(job._terminate_on_ack, (pool, 15)) + job.terminate(pool, signal='TERM') + + def test_revoked_expires_expired(self): + job = Request({ + 'task': self.mytask.name, + 'id': uuid(), + 'args': [1], + 'kwargs': {'f': 'x'}, + 'expires': datetime.utcnow() - timedelta(days=1), + }, app=self.app) + with assert_signal_called( + task_revoked, sender=job.task, request=job, + terminated=False, expired=True, signum=None): + job.revoked() + self.assertIn(job.id, revoked) + self.assertEqual( + self.mytask.backend.get_status(job.id), + states.REVOKED, + ) + + def test_revoked_expires_not_expired(self): + job = self.xRequest({ + 'expires': datetime.utcnow() + timedelta(days=1), + }) + job.revoked() + self.assertNotIn(job.id, revoked) + self.assertNotEqual( + self.mytask.backend.get_status(job.id), + states.REVOKED, + ) + + def test_revoked_expires_ignore_result(self): + self.mytask.ignore_result = True + job = self.xRequest({ + 'expires': datetime.utcnow() - timedelta(days=1), + }) + job.revoked() + self.assertIn(job.id, revoked) + self.assertNotEqual( + self.mytask.backend.get_status(job.id), states.REVOKED, + ) + + def test_send_email(self): + app = self.app + mail_sent = [False] + + def mock_mail_admins(*args, **kwargs): + mail_sent[0] = True + + def get_ei(): + try: + raise KeyError('moofoobar') + except: + return ExceptionInfo() + + app.mail_admins = mock_mail_admins + self.mytask.send_error_emails = True + job = self.xRequest() + einfo = get_ei() + job.on_failure(einfo) + self.assertTrue(mail_sent[0]) + + einfo = get_ei() + mail_sent[0] = False + self.mytask.send_error_emails = False + job.on_failure(einfo) + self.assertFalse(mail_sent[0]) + + einfo = get_ei() + mail_sent[0] = False + self.mytask.send_error_emails = True + job.on_failure(einfo) + self.assertTrue(mail_sent[0]) + + def test_already_revoked(self): + job = self.xRequest() + job._already_revoked = True + self.assertTrue(job.revoked()) + + def test_revoked(self): + job = self.xRequest() + with assert_signal_called( + task_revoked, sender=job.task, request=job, + terminated=False, expired=False, signum=None): + revoked.add(job.id) + self.assertTrue(job.revoked()) + self.assertTrue(job._already_revoked) + self.assertTrue(job.acknowledged) + + def test_execute_does_not_execute_revoked(self): + job = self.xRequest() + revoked.add(job.id) + job.execute() + + def test_execute_acks_late(self): + self.mytask_raising.acks_late = True + job = self.xRequest({ + 'task': self.mytask_raising.name, + 'kwargs': {}, + }) + job.execute() + self.assertTrue(job.acknowledged) + job.execute() + + def test_execute_using_pool_does_not_execute_revoked(self): + job = self.xRequest() + revoked.add(job.id) + with self.assertRaises(TaskRevokedError): + job.execute_using_pool(None) + + def test_on_accepted_acks_early(self): + job = self.xRequest() + job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) + self.assertTrue(job.acknowledged) + prev, module._does_debug = module._does_debug, False + try: + job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) + finally: + module._does_debug = prev + + def test_on_accepted_acks_late(self): + job = self.xRequest() + self.mytask.acks_late = True + job.on_accepted(pid=os.getpid(), time_accepted=monotonic()) + self.assertFalse(job.acknowledged) + + def test_on_accepted_terminates(self): + signum = signal.SIGTERM + pool = Mock() + job = self.xRequest() + with assert_signal_called( + task_revoked, sender=job.task, request=job, + terminated=True, expired=False, signum=signum): + job.terminate(pool, signal='TERM') + self.assertFalse(pool.terminate_job.call_count) + job.on_accepted(pid=314, time_accepted=monotonic()) + pool.terminate_job.assert_called_with(314, signum) + + def test_on_success_acks_early(self): + job = self.xRequest() + job.time_start = 1 + job.on_success(42) + prev, module._does_info = module._does_info, False + try: + job.on_success(42) + self.assertFalse(job.acknowledged) + finally: + module._does_info = prev + + def test_on_success_BaseException(self): + job = self.xRequest() + job.time_start = 1 + with self.assertRaises(SystemExit): + try: + raise SystemExit() + except SystemExit: + job.on_success(ExceptionInfo()) + else: + assert False + + def test_on_success_eventer(self): + job = self.xRequest() + job.time_start = 1 + job.eventer = Mock() + job.eventer.send = Mock() + job.on_success(42) + self.assertTrue(job.eventer.send.called) + + def test_on_success_when_failure(self): + job = self.xRequest() + job.time_start = 1 + job.on_failure = Mock() + try: + raise KeyError('foo') + except Exception: + job.on_success(ExceptionInfo()) + self.assertTrue(job.on_failure.called) + + def test_on_success_acks_late(self): + job = self.xRequest() + job.time_start = 1 + self.mytask.acks_late = True + job.on_success(42) + self.assertTrue(job.acknowledged) + + def test_on_failure_WorkerLostError(self): + + def get_ei(): + try: + raise WorkerLostError('do re mi') + except WorkerLostError: + return ExceptionInfo() + + job = self.xRequest() + exc_info = get_ei() + job.on_failure(exc_info) + self.assertEqual( + self.mytask.backend.get_status(job.id), states.FAILURE, + ) + + self.mytask.ignore_result = True + exc_info = get_ei() + job = self.xRequest() + job.on_failure(exc_info) + self.assertEqual( + self.mytask.backend.get_status(job.id), states.PENDING, + ) + + def test_on_failure_acks_late(self): + job = self.xRequest() + job.time_start = 1 + self.mytask.acks_late = True + try: + raise KeyError('foo') + except KeyError: + exc_info = ExceptionInfo() + job.on_failure(exc_info) + self.assertTrue(job.acknowledged) + + def test_from_message_invalid_kwargs(self): + body = dict(task=self.mytask.name, id=1, args=(), kwargs='foo') + with self.assertRaises(InvalidTaskError): + Request(body, message=None, app=self.app) + + @patch('celery.worker.job.error') + @patch('celery.worker.job.warn') + def test_on_timeout(self, warn, error): + + job = self.xRequest() + job.on_timeout(soft=True, timeout=1337) + self.assertIn('Soft time limit', warn.call_args[0][0]) + job.on_timeout(soft=False, timeout=1337) + self.assertIn('Hard time limit', error.call_args[0][0]) + self.assertEqual( + self.mytask.backend.get_status(job.id), states.FAILURE, + ) + + self.mytask.ignore_result = True + job = self.xRequest() + job.on_timeout(soft=True, timeout=1336) + self.assertEqual( + self.mytask.backend.get_status(job.id), states.PENDING, + ) + + def test_fast_trace_task(self): + from celery.app import trace + setup_worker_optimizations(self.app) + self.assertIs(trace.trace_task_ret, trace._fast_trace_task) + try: + self.mytask.__trace__ = build_tracer( + self.mytask.name, self.mytask, self.app.loader, 'test', + app=self.app, + ) + res = trace.trace_task_ret(self.mytask.name, uuid(), [4], {}) + self.assertEqual(res, 4 ** 4) + finally: + reset_worker_optimizations() + self.assertIs(trace.trace_task_ret, trace._trace_task_ret) + delattr(self.mytask, '__trace__') + res = trace.trace_task_ret( + self.mytask.name, uuid(), [4], {}, app=self.app, + ) + self.assertEqual(res, 4 ** 4) + + def test_trace_task_ret(self): + self.mytask.__trace__ = build_tracer( + self.mytask.name, self.mytask, self.app.loader, 'test', + app=self.app, + ) + res = _trace_task_ret(self.mytask.name, uuid(), [4], {}, app=self.app) + self.assertEqual(res, 4 ** 4) + + def test_trace_task_ret__no_trace(self): + try: + delattr(self.mytask, '__trace__') + except AttributeError: + pass + res = _trace_task_ret(self.mytask.name, uuid(), [4], {}, app=self.app) + self.assertEqual(res, 4 ** 4) + + def test_trace_catches_exception(self): + + def _error_exec(self, *args, **kwargs): + raise KeyError('baz') + + @self.app.task(request=None, shared=False) + def raising(): + raise KeyError('baz') + + with self.assertWarnsRegex(RuntimeWarning, + r'Exception raised outside'): + res = trace_task(raising, uuid(), [], {}, app=self.app) + self.assertIsInstance(res, ExceptionInfo) + + def test_worker_task_trace_handle_retry(self): + tid = uuid() + self.mytask.push_request(id=tid) + try: + raise ValueError('foo') + except Exception as exc: + try: + raise Retry(str(exc), exc=exc) + except Retry as exc: + w = TraceInfo(states.RETRY, exc) + w.handle_retry(self.mytask, store_errors=False) + self.assertEqual( + self.mytask.backend.get_status(tid), states.PENDING, + ) + w.handle_retry(self.mytask, store_errors=True) + self.assertEqual( + self.mytask.backend.get_status(tid), states.RETRY, + ) + finally: + self.mytask.pop_request() + + def test_worker_task_trace_handle_failure(self): + tid = uuid() + self.mytask.push_request() + try: + self.mytask.request.id = tid + try: + raise ValueError('foo') + except Exception as exc: + w = TraceInfo(states.FAILURE, exc) + w.handle_failure(self.mytask, store_errors=False) + self.assertEqual( + self.mytask.backend.get_status(tid), states.PENDING, + ) + w.handle_failure(self.mytask, store_errors=True) + self.assertEqual( + self.mytask.backend.get_status(tid), states.FAILURE, + ) + finally: + self.mytask.pop_request() + + def test_task_wrapper_mail_attrs(self): + job = self.xRequest({'args': [], 'kwargs': {}}) + x = job.success_msg % { + 'name': job.name, + 'id': job.id, + 'return_value': 10, + 'runtime': 0.3641, + } + self.assertTrue(x) + x = job.error_msg % { + 'name': job.name, + 'id': job.id, + 'exc': 'FOOBARBAZ', + 'description': 'raised unexpected', + 'traceback': 'foobarbaz', + } + self.assertTrue(x) + + def test_from_message(self): + us = 'æØåveéðƒeæ' + body = {'task': self.mytask.name, 'id': uuid(), + 'args': [2], 'kwargs': {us: 'bar'}} + m = Message(None, body=anyjson.dumps(body), backend='foo', + content_type='application/json', + content_encoding='utf-8') + job = Request(m.decode(), message=m, app=self.app) + self.assertIsInstance(job, Request) + self.assertEqual(job.name, body['task']) + self.assertEqual(job.id, body['id']) + self.assertEqual(job.args, body['args']) + us = from_utf8(us) + if sys.version_info < (2, 6): + self.assertEqual(next(keys(job.kwargs)), us) + self.assertIsInstance(next(keys(job.kwargs)), str) + + def test_from_message_empty_args(self): + body = {'task': self.mytask.name, 'id': uuid()} + m = Message(None, body=anyjson.dumps(body), backend='foo', + content_type='application/json', + content_encoding='utf-8') + job = Request(m.decode(), message=m, app=self.app) + self.assertIsInstance(job, Request) + self.assertEqual(job.args, []) + self.assertEqual(job.kwargs, {}) + + def test_from_message_missing_required_fields(self): + body = {} + m = Message(None, body=anyjson.dumps(body), backend='foo', + content_type='application/json', + content_encoding='utf-8') + with self.assertRaises(KeyError): + Request(m.decode(), message=m, app=self.app) + + def test_from_message_nonexistant_task(self): + body = {'task': 'cu.mytask.doesnotexist', 'id': uuid(), + 'args': [2], 'kwargs': {'æØåveéðƒeæ': 'bar'}} + m = Message(None, body=anyjson.dumps(body), backend='foo', + content_type='application/json', + content_encoding='utf-8') + with self.assertRaises(KeyError): + Request(m.decode(), message=m, app=self.app) + + def test_execute(self): + tid = uuid() + job = self.xRequest({'id': tid, 'args': [4], 'kwargs': {}}) + self.assertEqual(job.execute(), 256) + meta = self.mytask.backend.get_task_meta(tid) + self.assertEqual(meta['status'], states.SUCCESS) + self.assertEqual(meta['result'], 256) + + def test_execute_success_no_kwargs(self): + + @self.app.task # traverses coverage for decorator without parens + def mytask_no_kwargs(i): + return i ** i + + tid = uuid() + job = self.xRequest({ + 'task': mytask_no_kwargs.name, + 'id': tid, + 'args': [4], + 'kwargs': {}, + }) + self.assertEqual(job.execute(), 256) + meta = mytask_no_kwargs.backend.get_task_meta(tid) + self.assertEqual(meta['result'], 256) + self.assertEqual(meta['status'], states.SUCCESS) + + def test_execute_success_some_kwargs(self): + scratch = {'task_id': None} + + @self.app.task(shared=False, accept_magic_kwargs=True) + def mytask_some_kwargs(i, task_id): + scratch['task_id'] = task_id + return i ** i + + tid = uuid() + job = self.xRequest({ + 'task': mytask_some_kwargs.name, + 'id': tid, + 'args': [4], + 'kwargs': {}, + }) + self.assertEqual(job.execute(), 256) + meta = mytask_some_kwargs.backend.get_task_meta(tid) + self.assertEqual(scratch.get('task_id'), tid) + self.assertEqual(meta['result'], 256) + self.assertEqual(meta['status'], states.SUCCESS) + + def test_execute_ack(self): + scratch = {'ACK': False} + + def on_ack(*args, **kwargs): + scratch['ACK'] = True + + tid = uuid() + job = self.xRequest({'id': tid, 'args': [4]}, on_ack=on_ack) + self.assertEqual(job.execute(), 256) + meta = self.mytask.backend.get_task_meta(tid) + self.assertTrue(scratch['ACK']) + self.assertEqual(meta['result'], 256) + self.assertEqual(meta['status'], states.SUCCESS) + + def test_execute_fail(self): + tid = uuid() + job = self.xRequest({ + 'task': self.mytask_raising.name, + 'id': tid, + 'args': [4], + 'kwargs': {}, + }) + self.assertIsInstance(job.execute(), ExceptionInfo) + meta = self.mytask_raising.backend.get_task_meta(tid) + self.assertEqual(meta['status'], states.FAILURE) + self.assertIsInstance(meta['result'], KeyError) + + def test_execute_using_pool(self): + tid = uuid() + job = self.xRequest({'id': tid, 'args': [4]}) + + class MockPool(BasePool): + target = None + args = None + kwargs = None + + def __init__(self, *args, **kwargs): + pass + + def apply_async(self, target, args=None, kwargs=None, + *margs, **mkwargs): + self.target = target + self.args = args + self.kwargs = kwargs + + p = MockPool() + job.execute_using_pool(p) + self.assertTrue(p.target) + self.assertEqual(p.args[0], self.mytask.name) + self.assertEqual(p.args[1], tid) + self.assertEqual(p.args[2], [4]) + self.assertIn('f', p.args[3]) + self.assertIn([4], p.args) + + job.task.accept_magic_kwargs = False + job.execute_using_pool(p) + + def test_default_kwargs(self): + self.maxDiff = 3000 + tid = uuid() + job = self.xRequest({'id': tid, 'args': [4]}) + self.assertDictEqual( + job.extend_with_default_kwargs(), { + 'f': 'x', + 'logfile': None, + 'loglevel': None, + 'task_id': job.id, + 'task_retries': 0, + 'task_is_eager': False, + 'delivery_info': { + 'exchange': None, + 'routing_key': None, + 'priority': 0, + 'redelivered': False, + }, + 'task_name': job.name}) + + @patch('celery.worker.job.logger') + def _test_on_failure(self, exception, logger): + app = self.app + tid = uuid() + job = self.xRequest({'id': tid, 'args': [4]}) + try: + raise exception + except Exception: + exc_info = ExceptionInfo() + app.conf.CELERY_SEND_TASK_ERROR_EMAILS = True + job.on_failure(exc_info) + self.assertTrue(logger.log.called) + context = logger.log.call_args[0][2] + self.assertEqual(self.mytask.name, context['name']) + self.assertIn(tid, context['id']) + + def test_on_failure(self): + self._test_on_failure(Exception('Inside unit tests')) + + def test_on_failure_unicode_exception(self): + self._test_on_failure(Exception('Бобры атакуют')) + + def test_on_failure_utf8_exception(self): + self._test_on_failure(Exception( + from_utf8('Бобры атакуют'))) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_revoke.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_revoke.py new file mode 100644 index 0000000..4d5ad02 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_revoke.py @@ -0,0 +1,13 @@ +from __future__ import absolute_import + +from celery.worker import state +from celery.tests.case import AppCase + + +class test_revoked(AppCase): + + def test_is_working(self): + state.revoked.add('foo') + self.assertIn('foo', state.revoked) + state.revoked.pop_value('foo') + self.assertNotIn('foo', state.revoked) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_state.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_state.py new file mode 100644 index 0000000..ede9a00 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_state.py @@ -0,0 +1,161 @@ +from __future__ import absolute_import + +import pickle + +from time import time + +from celery.datastructures import LimitedSet +from celery.exceptions import WorkerShutdown, WorkerTerminate +from celery.worker import state + +from celery.tests.case import AppCase, Mock, patch + + +class StateResetCase(AppCase): + + def setup(self): + self.reset_state() + + def teardown(self): + self.reset_state() + + def reset_state(self): + state.active_requests.clear() + state.revoked.clear() + state.total_count.clear() + + +class MockShelve(dict): + filename = None + in_sync = False + closed = False + + def open(self, filename, **kwargs): + self.filename = filename + return self + + def sync(self): + self.in_sync = True + + def close(self): + self.closed = True + + +class MyPersistent(state.Persistent): + storage = MockShelve() + + +class test_maybe_shutdown(AppCase): + + def teardown(self): + state.should_stop = False + state.should_terminate = False + + def test_should_stop(self): + state.should_stop = True + with self.assertRaises(WorkerShutdown): + state.maybe_shutdown() + + def test_should_terminate(self): + state.should_terminate = True + with self.assertRaises(WorkerTerminate): + state.maybe_shutdown() + + +class test_Persistent(StateResetCase): + + def setup(self): + self.reset_state() + self.p = MyPersistent(state, filename='celery-state') + + def test_close_twice(self): + self.p._is_open = False + self.p.close() + + def test_constructor(self): + self.assertDictEqual(self.p.db, {}) + self.assertEqual(self.p.db.filename, self.p.filename) + + def test_save(self): + self.p.db['foo'] = 'bar' + self.p.save() + self.assertTrue(self.p.db.in_sync) + self.assertTrue(self.p.db.closed) + + def add_revoked(self, *ids): + for id in ids: + self.p.db.setdefault('revoked', LimitedSet()).add(id) + + def test_merge(self, data=['foo', 'bar', 'baz']): + self.add_revoked(*data) + self.p.merge() + for item in data: + self.assertIn(item, state.revoked) + + def test_merge_dict(self): + self.p.clock = Mock() + self.p.clock.adjust.return_value = 626 + d = {'revoked': {'abc': time()}, 'clock': 313} + self.p._merge_with(d) + self.p.clock.adjust.assert_called_with(313) + self.assertEqual(d['clock'], 626) + self.assertIn('abc', state.revoked) + + def test_sync_clock_and_purge(self): + passthrough = Mock() + passthrough.side_effect = lambda x: x + with patch('celery.worker.state.revoked') as revoked: + d = {'clock': 0} + self.p.clock = Mock() + self.p.clock.forward.return_value = 627 + self.p._dumps = passthrough + self.p.compress = passthrough + self.p._sync_with(d) + revoked.purge.assert_called_with() + self.assertEqual(d['clock'], 627) + self.assertNotIn('revoked', d) + self.assertIs(d['zrevoked'], revoked) + + def test_sync(self, data1=['foo', 'bar', 'baz'], + data2=['baz', 'ini', 'koz']): + self.add_revoked(*data1) + for item in data2: + state.revoked.add(item) + self.p.sync() + + self.assertTrue(self.p.db['zrevoked']) + pickled = self.p.decompress(self.p.db['zrevoked']) + self.assertTrue(pickled) + saved = pickle.loads(pickled) + for item in data2: + self.assertIn(item, saved) + + +class SimpleReq(object): + + def __init__(self, name): + self.name = name + + +class test_state(StateResetCase): + + def test_accepted(self, requests=[SimpleReq('foo'), + SimpleReq('bar'), + SimpleReq('baz'), + SimpleReq('baz')]): + for request in requests: + state.task_accepted(request) + for req in requests: + self.assertIn(req, state.active_requests) + self.assertEqual(state.total_count['foo'], 1) + self.assertEqual(state.total_count['bar'], 1) + self.assertEqual(state.total_count['baz'], 2) + + def test_ready(self, requests=[SimpleReq('foo'), + SimpleReq('bar')]): + for request in requests: + state.task_accepted(request) + self.assertEqual(len(state.active_requests), 2) + for request in requests: + state.task_ready(request) + self.assertEqual(len(state.active_requests), 0) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_strategy.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_strategy.py new file mode 100644 index 0000000..7edf78b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_strategy.py @@ -0,0 +1,139 @@ +from __future__ import absolute_import + +from collections import defaultdict +from contextlib import contextmanager + +from kombu.utils.limits import TokenBucket + +from celery.worker import state +from celery.utils.timeutils import rate + +from celery.tests.case import AppCase, Mock, patch, body_from_sig + + +class test_default_strategy(AppCase): + + def setup(self): + @self.app.task(shared=False) + def add(x, y): + return x + y + + self.add = add + + class Context(object): + + def __init__(self, sig, s, reserved, consumer, message, body): + self.sig = sig + self.s = s + self.reserved = reserved + self.consumer = consumer + self.message = message + self.body = body + + def __call__(self, **kwargs): + return self.s( + self.message, self.body, + self.message.ack, self.message.reject, [], **kwargs + ) + + def was_reserved(self): + return self.reserved.called + + def was_rate_limited(self): + assert not self.was_reserved() + return self.consumer._limit_task.called + + def was_scheduled(self): + assert not self.was_reserved() + assert not self.was_rate_limited() + return self.consumer.timer.call_at.called + + def event_sent(self): + return self.consumer.event_dispatcher.send.call_args + + def get_request(self): + if self.was_reserved(): + return self.reserved.call_args[0][0] + if self.was_rate_limited(): + return self.consumer._limit_task.call_args[0][0] + if self.was_scheduled(): + return self.consumer.timer.call_at.call_args[0][0] + raise ValueError('request not handled') + + @contextmanager + def _context(self, sig, + rate_limits=True, events=True, utc=True, limit=None): + self.assertTrue(sig.type.Strategy) + + reserved = Mock() + consumer = Mock() + consumer.task_buckets = defaultdict(lambda: None) + if limit: + bucket = TokenBucket(rate(limit), capacity=1) + consumer.task_buckets[sig.task] = bucket + consumer.disable_rate_limits = not rate_limits + consumer.event_dispatcher.enabled = events + s = sig.type.start_strategy(self.app, consumer, task_reserved=reserved) + self.assertTrue(s) + + message = Mock() + body = body_from_sig(self.app, sig, utc=utc) + + yield self.Context(sig, s, reserved, consumer, message, body) + + def test_when_logging_disabled(self): + with patch('celery.worker.strategy.logger') as logger: + logger.isEnabledFor.return_value = False + with self._context(self.add.s(2, 2)) as C: + C() + self.assertFalse(logger.info.called) + + def test_task_strategy(self): + with self._context(self.add.s(2, 2)) as C: + C() + self.assertTrue(C.was_reserved()) + req = C.get_request() + C.consumer.on_task_request.assert_called_with(req) + self.assertTrue(C.event_sent()) + + def test_when_events_disabled(self): + with self._context(self.add.s(2, 2), events=False) as C: + C() + self.assertTrue(C.was_reserved()) + self.assertFalse(C.event_sent()) + + def test_eta_task(self): + with self._context(self.add.s(2, 2).set(countdown=10)) as C: + C() + self.assertTrue(C.was_scheduled()) + C.consumer.qos.increment_eventually.assert_called_with() + + def test_eta_task_utc_disabled(self): + with self._context(self.add.s(2, 2).set(countdown=10), utc=False) as C: + C() + self.assertTrue(C.was_scheduled()) + C.consumer.qos.increment_eventually.assert_called_with() + + def test_when_rate_limited(self): + task = self.add.s(2, 2) + with self._context(task, rate_limits=True, limit='1/m') as C: + C() + self.assertTrue(C.was_rate_limited()) + + def test_when_rate_limited__limits_disabled(self): + task = self.add.s(2, 2) + with self._context(task, rate_limits=False, limit='1/m') as C: + C() + self.assertTrue(C.was_reserved()) + + def test_when_revoked(self): + task = self.add.s(2, 2) + task.freeze() + state.revoked.add(task.id) + try: + with self._context(task) as C: + C() + with self.assertRaises(ValueError): + C.get_request() + finally: + state.revoked.discard(task.id) diff --git a/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_worker.py b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_worker.py new file mode 100644 index 0000000..b700a6c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/tests/worker/test_worker.py @@ -0,0 +1,1128 @@ +from __future__ import absolute_import, print_function + +import os +import socket + +from collections import deque +from datetime import datetime, timedelta +from threading import Event + +from amqp import ChannelError +from kombu import Connection +from kombu.common import QoS, ignore_errors +from kombu.transport.base import Message + +from celery.app.defaults import DEFAULTS +from celery.bootsteps import RUN, CLOSE, StartStopStep +from celery.concurrency.base import BasePool +from celery.datastructures import AttributeDict +from celery.exceptions import ( + WorkerShutdown, WorkerTerminate, TaskRevokedError, +) +from celery.five import Empty, range, Queue as FastQueue +from celery.utils import uuid +from celery.worker import components +from celery.worker import consumer +from celery.worker.consumer import Consumer as __Consumer +from celery.worker.job import Request +from celery.utils import worker_direct +from celery.utils.serialization import pickle +from celery.utils.timer2 import Timer + +from celery.tests.case import AppCase, Mock, SkipTest, patch, restore_logging + + +def MockStep(step=None): + step = Mock() if step is None else step + step.blueprint = Mock() + step.blueprint.name = 'MockNS' + step.name = 'MockStep(%s)' % (id(step), ) + return step + + +def mock_event_dispatcher(): + evd = Mock(name='event_dispatcher') + evd.groups = ['worker'] + evd._outbound_buffer = deque() + return evd + + +class PlaceHolder(object): + pass + + +def find_step(obj, typ): + return obj.blueprint.steps[typ.name] + + +class Consumer(__Consumer): + + def __init__(self, *args, **kwargs): + kwargs.setdefault('without_mingle', True) # disable Mingle step + kwargs.setdefault('without_gossip', True) # disable Gossip step + kwargs.setdefault('without_heartbeat', True) # disable Heart step + super(Consumer, self).__init__(*args, **kwargs) + + +class _MyKombuConsumer(Consumer): + broadcast_consumer = Mock() + task_consumer = Mock() + + def __init__(self, *args, **kwargs): + kwargs.setdefault('pool', BasePool(2)) + super(_MyKombuConsumer, self).__init__(*args, **kwargs) + + def restart_heartbeat(self): + self.heart = None + + +class MyKombuConsumer(Consumer): + + def loop(self, *args, **kwargs): + pass + + +class MockNode(object): + commands = [] + + def handle_message(self, body, message): + self.commands.append(body.pop('command', None)) + + +class MockEventDispatcher(object): + sent = [] + closed = False + flushed = False + _outbound_buffer = [] + + def send(self, event, *args, **kwargs): + self.sent.append(event) + + def close(self): + self.closed = True + + def flush(self): + self.flushed = True + + +class MockHeart(object): + closed = False + + def stop(self): + self.closed = True + + +def create_message(channel, **data): + data.setdefault('id', uuid()) + channel.no_ack_consumers = set() + m = Message(channel, body=pickle.dumps(dict(**data)), + content_type='application/x-python-serialize', + content_encoding='binary', + delivery_info={'consumer_tag': 'mock'}) + m.accept = ['application/x-python-serialize'] + return m + + +class test_Consumer(AppCase): + + def setup(self): + self.buffer = FastQueue() + self.timer = Timer() + + @self.app.task(shared=False) + def foo_task(x, y, z): + return x * y * z + self.foo_task = foo_task + + def teardown(self): + self.timer.stop() + + def test_info(self): + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.task_consumer = Mock() + l.qos = QoS(l.task_consumer.qos, 10) + l.connection = Mock() + l.connection.info.return_value = {'foo': 'bar'} + l.controller = l.app.WorkController() + l.controller.pool = Mock() + l.controller.pool.info.return_value = [Mock(), Mock()] + l.controller.consumer = l + info = l.controller.stats() + self.assertEqual(info['prefetch_count'], 10) + self.assertTrue(info['broker']) + + def test_start_when_closed(self): + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = CLOSE + l.start() + + def test_connection(self): + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + + l.blueprint.start(l) + self.assertIsInstance(l.connection, Connection) + + l.blueprint.state = RUN + l.event_dispatcher = None + l.blueprint.restart(l) + self.assertTrue(l.connection) + + l.blueprint.state = RUN + l.shutdown() + self.assertIsNone(l.connection) + self.assertIsNone(l.task_consumer) + + l.blueprint.start(l) + self.assertIsInstance(l.connection, Connection) + l.blueprint.restart(l) + + l.stop() + l.shutdown() + self.assertIsNone(l.connection) + self.assertIsNone(l.task_consumer) + + def test_close_connection(self): + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + step = find_step(l, consumer.Connection) + conn = l.connection = Mock() + step.shutdown(l) + self.assertTrue(conn.close.called) + self.assertIsNone(l.connection) + + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + eventer = l.event_dispatcher = mock_event_dispatcher() + eventer.enabled = True + heart = l.heart = MockHeart() + l.blueprint.state = RUN + Events = find_step(l, consumer.Events) + Events.shutdown(l) + Heart = find_step(l, consumer.Heart) + Heart.shutdown(l) + self.assertTrue(eventer.close.call_count) + self.assertTrue(heart.closed) + + @patch('celery.worker.consumer.warn') + def test_receive_message_unknown(self, warn): + l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + l.steps.pop() + backend = Mock() + m = create_message(backend, unknown={'baz': '!!!'}) + l.event_dispatcher = mock_event_dispatcher() + l.node = MockNode() + + callback = self._get_on_message(l) + callback(m.decode(), m) + self.assertTrue(warn.call_count) + + @patch('celery.worker.strategy.to_timestamp') + def test_receive_message_eta_OverflowError(self, to_timestamp): + to_timestamp.side_effect = OverflowError() + l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + l.steps.pop() + m = create_message(Mock(), task=self.foo_task.name, + args=('2, 2'), + kwargs={}, + eta=datetime.now().isoformat()) + l.event_dispatcher = mock_event_dispatcher() + l.node = MockNode() + l.update_strategies() + l.qos = Mock() + + callback = self._get_on_message(l) + callback(m.decode(), m) + self.assertTrue(m.acknowledged) + + @patch('celery.worker.consumer.error') + def test_receive_message_InvalidTaskError(self, error): + l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + l.event_dispatcher = mock_event_dispatcher() + l.steps.pop() + m = create_message(Mock(), task=self.foo_task.name, + args=(1, 2), kwargs='foobarbaz', id=1) + l.update_strategies() + l.event_dispatcher = mock_event_dispatcher() + + callback = self._get_on_message(l) + callback(m.decode(), m) + self.assertIn('Received invalid task message', error.call_args[0][0]) + + @patch('celery.worker.consumer.crit') + def test_on_decode_error(self, crit): + l = Consumer(self.buffer.put, timer=self.timer, app=self.app) + + class MockMessage(Mock): + content_type = 'application/x-msgpack' + content_encoding = 'binary' + body = 'foobarbaz' + + message = MockMessage() + l.on_decode_error(message, KeyError('foo')) + self.assertTrue(message.ack.call_count) + self.assertIn("Can't decode message body", crit.call_args[0][0]) + + def _get_on_message(self, l): + if l.qos is None: + l.qos = Mock() + l.event_dispatcher = mock_event_dispatcher() + l.task_consumer = Mock() + l.connection = Mock() + l.connection.drain_events.side_effect = WorkerShutdown() + + with self.assertRaises(WorkerShutdown): + l.loop(*l.loop_args()) + self.assertTrue(l.task_consumer.register_callback.called) + return l.task_consumer.register_callback.call_args[0][0] + + def test_receieve_message(self): + l = Consumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + l.event_dispatcher = mock_event_dispatcher() + m = create_message(Mock(), task=self.foo_task.name, + args=[2, 4, 8], kwargs={}) + l.update_strategies() + callback = self._get_on_message(l) + callback(m.decode(), m) + + in_bucket = self.buffer.get_nowait() + self.assertIsInstance(in_bucket, Request) + self.assertEqual(in_bucket.name, self.foo_task.name) + self.assertEqual(in_bucket.execute(), 2 * 4 * 8) + self.assertTrue(self.timer.empty()) + + def test_start_channel_error(self): + + class MockConsumer(Consumer): + iterations = 0 + + def loop(self, *args, **kwargs): + if not self.iterations: + self.iterations = 1 + raise KeyError('foo') + raise SyntaxError('bar') + + l = MockConsumer(self.buffer.put, timer=self.timer, + send_events=False, pool=BasePool(), app=self.app) + l.channel_errors = (KeyError, ) + with self.assertRaises(KeyError): + l.start() + l.timer.stop() + + def test_start_connection_error(self): + + class MockConsumer(Consumer): + iterations = 0 + + def loop(self, *args, **kwargs): + if not self.iterations: + self.iterations = 1 + raise KeyError('foo') + raise SyntaxError('bar') + + l = MockConsumer(self.buffer.put, timer=self.timer, + send_events=False, pool=BasePool(), app=self.app) + + l.connection_errors = (KeyError, ) + self.assertRaises(SyntaxError, l.start) + l.timer.stop() + + def test_loop_ignores_socket_timeout(self): + + class Connection(self.app.connection().__class__): + obj = None + + def drain_events(self, **kwargs): + self.obj.connection = None + raise socket.timeout(10) + + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.connection = Connection() + l.task_consumer = Mock() + l.connection.obj = l + l.qos = QoS(l.task_consumer.qos, 10) + l.loop(*l.loop_args()) + + def test_loop_when_socket_error(self): + + class Connection(self.app.connection().__class__): + obj = None + + def drain_events(self, **kwargs): + self.obj.connection = None + raise socket.error('foo') + + l = Consumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + c = l.connection = Connection() + l.connection.obj = l + l.task_consumer = Mock() + l.qos = QoS(l.task_consumer.qos, 10) + with self.assertRaises(socket.error): + l.loop(*l.loop_args()) + + l.blueprint.state = CLOSE + l.connection = c + l.loop(*l.loop_args()) + + def test_loop(self): + + class Connection(self.app.connection().__class__): + obj = None + + def drain_events(self, **kwargs): + self.obj.connection = None + + l = Consumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + l.connection = Connection() + l.connection.obj = l + l.task_consumer = Mock() + l.qos = QoS(l.task_consumer.qos, 10) + + l.loop(*l.loop_args()) + l.loop(*l.loop_args()) + self.assertTrue(l.task_consumer.consume.call_count) + l.task_consumer.qos.assert_called_with(prefetch_count=10) + self.assertEqual(l.qos.value, 10) + l.qos.decrement_eventually() + self.assertEqual(l.qos.value, 9) + l.qos.update() + self.assertEqual(l.qos.value, 9) + l.task_consumer.qos.assert_called_with(prefetch_count=9) + + def test_ignore_errors(self): + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.connection_errors = (AttributeError, KeyError, ) + l.channel_errors = (SyntaxError, ) + ignore_errors(l, Mock(side_effect=AttributeError('foo'))) + ignore_errors(l, Mock(side_effect=KeyError('foo'))) + ignore_errors(l, Mock(side_effect=SyntaxError('foo'))) + with self.assertRaises(IndexError): + ignore_errors(l, Mock(side_effect=IndexError('foo'))) + + def test_apply_eta_task(self): + from celery.worker import state + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.qos = QoS(None, 10) + + task = object() + qos = l.qos.value + l.apply_eta_task(task) + self.assertIn(task, state.reserved_requests) + self.assertEqual(l.qos.value, qos - 1) + self.assertIs(self.buffer.get_nowait(), task) + + def test_receieve_message_eta_isoformat(self): + l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + l.steps.pop() + m = create_message( + Mock(), task=self.foo_task.name, + eta=(datetime.now() + timedelta(days=1)).isoformat(), + args=[2, 4, 8], kwargs={}, + ) + + l.task_consumer = Mock() + l.qos = QoS(l.task_consumer.qos, 1) + current_pcount = l.qos.value + l.event_dispatcher = mock_event_dispatcher() + l.enabled = False + l.update_strategies() + callback = self._get_on_message(l) + callback(m.decode(), m) + l.timer.stop() + l.timer.join(1) + + items = [entry[2] for entry in self.timer.queue] + found = 0 + for item in items: + if item.args[0].name == self.foo_task.name: + found = True + self.assertTrue(found) + self.assertGreater(l.qos.value, current_pcount) + l.timer.stop() + + def test_pidbox_callback(self): + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + con = find_step(l, consumer.Control).box + con.node = Mock() + con.reset = Mock() + + con.on_message('foo', 'bar') + con.node.handle_message.assert_called_with('foo', 'bar') + + con.node = Mock() + con.node.handle_message.side_effect = KeyError('foo') + con.on_message('foo', 'bar') + con.node.handle_message.assert_called_with('foo', 'bar') + + con.node = Mock() + con.node.handle_message.side_effect = ValueError('foo') + con.on_message('foo', 'bar') + con.node.handle_message.assert_called_with('foo', 'bar') + self.assertTrue(con.reset.called) + + def test_revoke(self): + l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + l.steps.pop() + backend = Mock() + id = uuid() + t = create_message(backend, task=self.foo_task.name, args=[2, 4, 8], + kwargs={}, id=id) + from celery.worker.state import revoked + revoked.add(id) + + callback = self._get_on_message(l) + callback(t.decode(), t) + self.assertTrue(self.buffer.empty()) + + def test_receieve_message_not_registered(self): + l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + l.steps.pop() + backend = Mock() + m = create_message(backend, task='x.X.31x', args=[2, 4, 8], kwargs={}) + + l.event_dispatcher = mock_event_dispatcher() + callback = self._get_on_message(l) + self.assertFalse(callback(m.decode(), m)) + with self.assertRaises(Empty): + self.buffer.get_nowait() + self.assertTrue(self.timer.empty()) + + @patch('celery.worker.consumer.warn') + @patch('celery.worker.consumer.logger') + def test_receieve_message_ack_raises(self, logger, warn): + l = Consumer(self.buffer.put, timer=self.timer, app=self.app) + l.blueprint.state = RUN + backend = Mock() + m = create_message(backend, args=[2, 4, 8], kwargs={}) + + l.event_dispatcher = mock_event_dispatcher() + l.connection_errors = (socket.error, ) + m.reject = Mock() + m.reject.side_effect = socket.error('foo') + callback = self._get_on_message(l) + self.assertFalse(callback(m.decode(), m)) + self.assertTrue(warn.call_count) + with self.assertRaises(Empty): + self.buffer.get_nowait() + self.assertTrue(self.timer.empty()) + m.reject.assert_called_with(requeue=False) + self.assertTrue(logger.critical.call_count) + + def test_receive_message_eta(self): + import sys + from functools import partial + if os.environ.get('C_DEBUG_TEST'): + pp = partial(print, file=sys.__stderr__) + else: + def pp(*args, **kwargs): + pass + pp('TEST RECEIVE MESSAGE ETA') + pp('+CREATE MYKOMBUCONSUMER') + l = _MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + pp('-CREATE MYKOMBUCONSUMER') + l.steps.pop() + l.event_dispatcher = mock_event_dispatcher() + backend = Mock() + pp('+ CREATE MESSAGE') + m = create_message( + backend, task=self.foo_task.name, + args=[2, 4, 8], kwargs={}, + eta=(datetime.now() + timedelta(days=1)).isoformat(), + ) + pp('- CREATE MESSAGE') + + try: + pp('+ BLUEPRINT START 1') + l.blueprint.start(l) + pp('- BLUEPRINT START 1') + p = l.app.conf.BROKER_CONNECTION_RETRY + l.app.conf.BROKER_CONNECTION_RETRY = False + pp('+ BLUEPRINT START 2') + l.blueprint.start(l) + pp('- BLUEPRINT START 2') + l.app.conf.BROKER_CONNECTION_RETRY = p + pp('+ BLUEPRINT RESTART') + l.blueprint.restart(l) + pp('- BLUEPRINT RESTART') + l.event_dispatcher = mock_event_dispatcher() + pp('+ GET ON MESSAGE') + callback = self._get_on_message(l) + pp('- GET ON MESSAGE') + pp('+ CALLBACK') + callback(m.decode(), m) + pp('- CALLBACK') + finally: + pp('+ STOP TIMER') + l.timer.stop() + pp('- STOP TIMER') + try: + pp('+ JOIN TIMER') + l.timer.join() + pp('- JOIN TIMER') + except RuntimeError: + pass + + in_hold = l.timer.queue[0] + self.assertEqual(len(in_hold), 3) + eta, priority, entry = in_hold + task = entry.args[0] + self.assertIsInstance(task, Request) + self.assertEqual(task.name, self.foo_task.name) + self.assertEqual(task.execute(), 2 * 4 * 8) + with self.assertRaises(Empty): + self.buffer.get_nowait() + + def test_reset_pidbox_node(self): + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + con = find_step(l, consumer.Control).box + con.node = Mock() + chan = con.node.channel = Mock() + l.connection = Mock() + chan.close.side_effect = socket.error('foo') + l.connection_errors = (socket.error, ) + con.reset() + chan.close.assert_called_with() + + def test_reset_pidbox_node_green(self): + from celery.worker.pidbox import gPidbox + pool = Mock() + pool.is_green = True + l = MyKombuConsumer(self.buffer.put, timer=self.timer, pool=pool, + app=self.app) + con = find_step(l, consumer.Control) + self.assertIsInstance(con.box, gPidbox) + con.start(l) + l.pool.spawn_n.assert_called_with( + con.box.loop, l, + ) + + def test__green_pidbox_node(self): + pool = Mock() + pool.is_green = True + l = MyKombuConsumer(self.buffer.put, timer=self.timer, pool=pool, + app=self.app) + l.node = Mock() + controller = find_step(l, consumer.Control) + + class BConsumer(Mock): + + def __enter__(self): + self.consume() + return self + + def __exit__(self, *exc_info): + self.cancel() + + controller.box.node.listen = BConsumer() + connections = [] + + class Connection(object): + calls = 0 + + def __init__(self, obj): + connections.append(self) + self.obj = obj + self.default_channel = self.channel() + self.closed = False + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + + def channel(self): + return Mock() + + def as_uri(self): + return 'dummy://' + + def drain_events(self, **kwargs): + if not self.calls: + self.calls += 1 + raise socket.timeout() + self.obj.connection = None + controller.box._node_shutdown.set() + + def close(self): + self.closed = True + + l.connection = Mock() + l.connect = lambda: Connection(obj=l) + controller = find_step(l, consumer.Control) + controller.box.loop(l) + + self.assertTrue(controller.box.node.listen.called) + self.assertTrue(controller.box.consumer) + controller.box.consumer.consume.assert_called_with() + + self.assertIsNone(l.connection) + self.assertTrue(connections[0].closed) + + @patch('kombu.connection.Connection._establish_connection') + @patch('kombu.utils.sleep') + def test_connect_errback(self, sleep, connect): + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + from kombu.transport.memory import Transport + Transport.connection_errors = (ChannelError, ) + + def effect(): + if connect.call_count > 1: + return + raise ChannelError('error') + connect.side_effect = effect + l.connect() + connect.assert_called_with() + + def test_stop_pidbox_node(self): + l = MyKombuConsumer(self.buffer.put, timer=self.timer, app=self.app) + cont = find_step(l, consumer.Control) + cont._node_stopped = Event() + cont._node_shutdown = Event() + cont._node_stopped.set() + cont.stop(l) + + def test_start__loop(self): + + class _QoS(object): + prev = 3 + value = 4 + + def update(self): + self.prev = self.value + + class _Consumer(MyKombuConsumer): + iterations = 0 + + def reset_connection(self): + if self.iterations >= 1: + raise KeyError('foo') + + init_callback = Mock() + l = _Consumer(self.buffer.put, timer=self.timer, + init_callback=init_callback, app=self.app) + l.task_consumer = Mock() + l.broadcast_consumer = Mock() + l.qos = _QoS() + l.connection = Connection() + l.iterations = 0 + + def raises_KeyError(*args, **kwargs): + l.iterations += 1 + if l.qos.prev != l.qos.value: + l.qos.update() + if l.iterations >= 2: + raise KeyError('foo') + + l.loop = raises_KeyError + with self.assertRaises(KeyError): + l.start() + self.assertEqual(l.iterations, 2) + self.assertEqual(l.qos.prev, l.qos.value) + + init_callback.reset_mock() + l = _Consumer(self.buffer.put, timer=self.timer, app=self.app, + send_events=False, init_callback=init_callback) + l.qos = _QoS() + l.task_consumer = Mock() + l.broadcast_consumer = Mock() + l.connection = Connection() + l.loop = Mock(side_effect=socket.error('foo')) + with self.assertRaises(socket.error): + l.start() + self.assertTrue(l.loop.call_count) + + def test_reset_connection_with_no_node(self): + l = Consumer(self.buffer.put, timer=self.timer, app=self.app) + l.steps.pop() + self.assertEqual(None, l.pool) + l.blueprint.start(l) + + +class test_WorkController(AppCase): + + def setup(self): + self.worker = self.create_worker() + from celery import worker + self._logger = worker.logger + self._comp_logger = components.logger + self.logger = worker.logger = Mock() + self.comp_logger = components.logger = Mock() + + @self.app.task(shared=False) + def foo_task(x, y, z): + return x * y * z + self.foo_task = foo_task + + def teardown(self): + from celery import worker + worker.logger = self._logger + components.logger = self._comp_logger + + def create_worker(self, **kw): + worker = self.app.WorkController(concurrency=1, loglevel=0, **kw) + worker.blueprint.shutdown_complete.set() + return worker + + def test_on_consumer_ready(self): + self.worker.on_consumer_ready(Mock()) + + def test_setup_queues_worker_direct(self): + self.app.conf.CELERY_WORKER_DIRECT = True + self.app.amqp.__dict__['queues'] = Mock() + self.worker.setup_queues({}) + self.app.amqp.queues.select_add.assert_called_with( + worker_direct(self.worker.hostname), + ) + + def test_send_worker_shutdown(self): + with patch('celery.signals.worker_shutdown') as ws: + self.worker._send_worker_shutdown() + ws.send.assert_called_with(sender=self.worker) + + def test_process_shutdown_on_worker_shutdown(self): + raise SkipTest('unstable test') + from celery.concurrency.prefork import process_destructor + from celery.concurrency.asynpool import Worker + with patch('celery.signals.worker_process_shutdown') as ws: + Worker._make_shortcuts = Mock() + with patch('os._exit') as _exit: + worker = Worker(None, None, on_exit=process_destructor) + worker._do_exit(22, 3.1415926) + ws.send.assert_called_with( + sender=None, pid=22, exitcode=3.1415926, + ) + _exit.assert_called_with(3.1415926) + + def test_process_task_revoked_release_semaphore(self): + self.worker._quick_release = Mock() + req = Mock() + req.execute_using_pool.side_effect = TaskRevokedError + self.worker._process_task(req) + self.worker._quick_release.assert_called_with() + + delattr(self.worker, '_quick_release') + self.worker._process_task(req) + + def test_shutdown_no_blueprint(self): + self.worker.blueprint = None + self.worker._shutdown() + + @patch('celery.platforms.create_pidlock') + def test_use_pidfile(self, create_pidlock): + create_pidlock.return_value = Mock() + worker = self.create_worker(pidfile='pidfilelockfilepid') + worker.steps = [] + worker.start() + self.assertTrue(create_pidlock.called) + worker.stop() + self.assertTrue(worker.pidlock.release.called) + + @patch('celery.platforms.signals') + @patch('celery.platforms.set_mp_process_title') + def test_process_initializer(self, set_mp_process_title, _signals): + with restore_logging(): + from celery import signals + from celery._state import _tls + from celery.concurrency.prefork import ( + process_initializer, WORKER_SIGRESET, WORKER_SIGIGNORE, + ) + + def on_worker_process_init(**kwargs): + on_worker_process_init.called = True + on_worker_process_init.called = False + signals.worker_process_init.connect(on_worker_process_init) + + def Loader(*args, **kwargs): + loader = Mock(*args, **kwargs) + loader.conf = {} + loader.override_backends = {} + return loader + + with self.Celery(loader=Loader) as app: + app.conf = AttributeDict(DEFAULTS) + process_initializer(app, 'awesome.worker.com') + _signals.ignore.assert_any_call(*WORKER_SIGIGNORE) + _signals.reset.assert_any_call(*WORKER_SIGRESET) + self.assertTrue(app.loader.init_worker.call_count) + self.assertTrue(on_worker_process_init.called) + self.assertIs(_tls.current_app, app) + set_mp_process_title.assert_called_with( + 'celeryd', hostname='awesome.worker.com', + ) + + with patch('celery.app.trace.setup_worker_optimizations') as S: + os.environ['FORKED_BY_MULTIPROCESSING'] = "1" + try: + process_initializer(app, 'luke.worker.com') + S.assert_called_with(app) + finally: + os.environ.pop('FORKED_BY_MULTIPROCESSING', None) + + def test_attrs(self): + worker = self.worker + self.assertIsNotNone(worker.timer) + self.assertIsInstance(worker.timer, Timer) + self.assertIsNotNone(worker.pool) + self.assertIsNotNone(worker.consumer) + self.assertTrue(worker.steps) + + def test_with_embedded_beat(self): + worker = self.app.WorkController(concurrency=1, loglevel=0, beat=True) + self.assertTrue(worker.beat) + self.assertIn(worker.beat, [w.obj for w in worker.steps]) + + def test_with_autoscaler(self): + worker = self.create_worker( + autoscale=[10, 3], send_events=False, + timer_cls='celery.utils.timer2.Timer', + ) + self.assertTrue(worker.autoscaler) + + def test_dont_stop_or_terminate(self): + worker = self.app.WorkController(concurrency=1, loglevel=0) + worker.stop() + self.assertNotEqual(worker.blueprint.state, CLOSE) + worker.terminate() + self.assertNotEqual(worker.blueprint.state, CLOSE) + + sigsafe, worker.pool.signal_safe = worker.pool.signal_safe, False + try: + worker.blueprint.state = RUN + worker.stop(in_sighandler=True) + self.assertNotEqual(worker.blueprint.state, CLOSE) + worker.terminate(in_sighandler=True) + self.assertNotEqual(worker.blueprint.state, CLOSE) + finally: + worker.pool.signal_safe = sigsafe + + def test_on_timer_error(self): + worker = self.app.WorkController(concurrency=1, loglevel=0) + + try: + raise KeyError('foo') + except KeyError as exc: + components.Timer(worker).on_timer_error(exc) + msg, args = self.comp_logger.error.call_args[0] + self.assertIn('KeyError', msg % args) + + def test_on_timer_tick(self): + worker = self.app.WorkController(concurrency=1, loglevel=10) + + components.Timer(worker).on_timer_tick(30.0) + xargs = self.comp_logger.debug.call_args[0] + fmt, arg = xargs[0], xargs[1] + self.assertEqual(30.0, arg) + self.assertIn('Next eta %s secs', fmt) + + def test_process_task(self): + worker = self.worker + worker.pool = Mock() + backend = Mock() + m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10], + kwargs={}) + task = Request(m.decode(), message=m, app=self.app) + worker._process_task(task) + self.assertEqual(worker.pool.apply_async.call_count, 1) + worker.pool.stop() + + def test_process_task_raise_base(self): + worker = self.worker + worker.pool = Mock() + worker.pool.apply_async.side_effect = KeyboardInterrupt('Ctrl+C') + backend = Mock() + m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10], + kwargs={}) + task = Request(m.decode(), message=m, app=self.app) + worker.steps = [] + worker.blueprint.state = RUN + with self.assertRaises(KeyboardInterrupt): + worker._process_task(task) + + def test_process_task_raise_WorkerTerminate(self): + worker = self.worker + worker.pool = Mock() + worker.pool.apply_async.side_effect = WorkerTerminate() + backend = Mock() + m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10], + kwargs={}) + task = Request(m.decode(), message=m, app=self.app) + worker.steps = [] + worker.blueprint.state = RUN + with self.assertRaises(SystemExit): + worker._process_task(task) + + def test_process_task_raise_regular(self): + worker = self.worker + worker.pool = Mock() + worker.pool.apply_async.side_effect = KeyError('some exception') + backend = Mock() + m = create_message(backend, task=self.foo_task.name, args=[4, 8, 10], + kwargs={}) + task = Request(m.decode(), message=m, app=self.app) + worker._process_task(task) + worker.pool.stop() + + def test_start_catches_base_exceptions(self): + worker1 = self.create_worker() + worker1.blueprint.state = RUN + stc = MockStep() + stc.start.side_effect = WorkerTerminate() + worker1.steps = [stc] + worker1.start() + stc.start.assert_called_with(worker1) + self.assertTrue(stc.terminate.call_count) + + worker2 = self.create_worker() + worker2.blueprint.state = RUN + sec = MockStep() + sec.start.side_effect = WorkerShutdown() + sec.terminate = None + worker2.steps = [sec] + worker2.start() + self.assertTrue(sec.stop.call_count) + + def test_state_db(self): + from celery.worker import state + Persistent = state.Persistent + + state.Persistent = Mock() + try: + worker = self.create_worker(state_db='statefilename') + self.assertTrue(worker._persistence) + finally: + state.Persistent = Persistent + + def test_process_task_sem(self): + worker = self.worker + worker._quick_acquire = Mock() + + req = Mock() + worker._process_task_sem(req) + worker._quick_acquire.assert_called_with(worker._process_task, req) + + def test_signal_consumer_close(self): + worker = self.worker + worker.consumer = Mock() + + worker.signal_consumer_close() + worker.consumer.close.assert_called_with() + + worker.consumer.close.side_effect = AttributeError() + worker.signal_consumer_close() + + def test_start__stop(self): + worker = self.worker + worker.blueprint.shutdown_complete.set() + worker.steps = [MockStep(StartStopStep(self)) for _ in range(4)] + worker.blueprint.state = RUN + worker.blueprint.started = 4 + for w in worker.steps: + w.start = Mock() + w.close = Mock() + w.stop = Mock() + + worker.start() + for w in worker.steps: + self.assertTrue(w.start.call_count) + worker.consumer = Mock() + worker.stop() + for stopstep in worker.steps: + self.assertTrue(stopstep.close.call_count) + self.assertTrue(stopstep.stop.call_count) + + # Doesn't close pool if no pool. + worker.start() + worker.pool = None + worker.stop() + + # test that stop of None is not attempted + worker.steps[-1] = None + worker.start() + worker.stop() + + def test_step_raises(self): + worker = self.worker + step = Mock() + worker.steps = [step] + step.start.side_effect = TypeError() + worker.stop = Mock() + worker.start() + worker.stop.assert_called_with() + + def test_state(self): + self.assertTrue(self.worker.state) + + def test_start__terminate(self): + worker = self.worker + worker.blueprint.shutdown_complete.set() + worker.blueprint.started = 5 + worker.blueprint.state = RUN + worker.steps = [MockStep() for _ in range(5)] + worker.start() + for w in worker.steps[:3]: + self.assertTrue(w.start.call_count) + self.assertTrue(worker.blueprint.started, len(worker.steps)) + self.assertEqual(worker.blueprint.state, RUN) + worker.terminate() + for step in worker.steps: + self.assertTrue(step.terminate.call_count) + + def test_Queues_pool_no_sem(self): + w = Mock() + w.pool_cls.uses_semaphore = False + components.Queues(w).create(w) + self.assertIs(w.process_task, w._process_task) + + def test_Hub_crate(self): + w = Mock() + x = components.Hub(w) + x.create(w) + self.assertTrue(w.timer.max_interval) + + def test_Pool_crate_threaded(self): + w = Mock() + w._conninfo.connection_errors = w._conninfo.channel_errors = () + w.pool_cls = Mock() + w.use_eventloop = False + pool = components.Pool(w) + pool.create(w) + + def test_Pool_create(self): + from kombu.async.semaphore import LaxBoundedSemaphore + w = Mock() + w._conninfo.connection_errors = w._conninfo.channel_errors = () + w.hub = Mock() + + PoolImp = Mock() + poolimp = PoolImp.return_value = Mock() + poolimp._pool = [Mock(), Mock()] + poolimp._cache = {} + poolimp._fileno_to_inq = {} + poolimp._fileno_to_outq = {} + + from celery.concurrency.prefork import TaskPool as _TaskPool + + class MockTaskPool(_TaskPool): + Pool = PoolImp + + @property + def timers(self): + return {Mock(): 30} + + w.pool_cls = MockTaskPool + w.use_eventloop = True + w.consumer.restart_count = -1 + pool = components.Pool(w) + pool.create(w) + pool.register_with_event_loop(w, w.hub) + self.assertIsInstance(w.semaphore, LaxBoundedSemaphore) + P = w.pool + P.start() diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/utils/__init__.py new file mode 100644 index 0000000..20e11f0 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/__init__.py @@ -0,0 +1,407 @@ +# -*- coding: utf-8 -*- +""" + celery.utils + ~~~~~~~~~~~~ + + Utility functions. + +""" +from __future__ import absolute_import, print_function + +import numbers +import os +import re +import socket +import sys +import traceback +import warnings +import datetime + +from collections import Callable +from functools import partial, wraps +from inspect import getargspec +from pprint import pprint + +from kombu.entity import Exchange, Queue + +from celery.exceptions import CPendingDeprecationWarning, CDeprecationWarning +from celery.five import WhateverIO, items, reraise, string_t + +__all__ = ['worker_direct', 'warn_deprecated', 'deprecated', 'lpmerge', + 'is_iterable', 'isatty', 'cry', 'maybe_reraise', 'strtobool', + 'jsonify', 'gen_task_name', 'nodename', 'nodesplit', + 'cached_property'] + +PY3 = sys.version_info[0] == 3 + + +PENDING_DEPRECATION_FMT = """ + {description} is scheduled for deprecation in \ + version {deprecation} and removal in version v{removal}. \ + {alternative} +""" + +DEPRECATION_FMT = """ + {description} is deprecated and scheduled for removal in + version {removal}. {alternative} +""" + +UNKNOWN_SIMPLE_FORMAT_KEY = """ +Unknown format %{0} in string {1!r}. +Possible causes: Did you forget to escape the expand sign (use '%%{0!r}'), +or did you escape and the value was expanded twice? (%%N -> %N -> %hostname)? +""".strip() + +#: Billiard sets this when execv is enabled. +#: We use it to find out the name of the original ``__main__`` +#: module, so that we can properly rewrite the name of the +#: task to be that of ``App.main``. +MP_MAIN_FILE = os.environ.get('MP_MAIN_FILE') or None + +#: Exchange for worker direct queues. +WORKER_DIRECT_EXCHANGE = Exchange('C.dq') + +#: Format for worker direct queue names. +WORKER_DIRECT_QUEUE_FORMAT = '{hostname}.dq' + +#: Separator for worker node name and hostname. +NODENAME_SEP = '@' + +NODENAME_DEFAULT = 'celery' +RE_FORMAT = re.compile(r'%(\w)') + + +def worker_direct(hostname): + """Return :class:`kombu.Queue` that is a direct route to + a worker by hostname. + + :param hostname: The fully qualified node name of a worker + (e.g. ``w1@example.com``). If passed a + :class:`kombu.Queue` instance it will simply return + that instead. + """ + if isinstance(hostname, Queue): + return hostname + return Queue(WORKER_DIRECT_QUEUE_FORMAT.format(hostname=hostname), + WORKER_DIRECT_EXCHANGE, + hostname, auto_delete=True) + + +def warn_deprecated(description=None, deprecation=None, + removal=None, alternative=None, stacklevel=2): + ctx = {'description': description, + 'deprecation': deprecation, 'removal': removal, + 'alternative': alternative} + if deprecation is not None: + w = CPendingDeprecationWarning(PENDING_DEPRECATION_FMT.format(**ctx)) + else: + w = CDeprecationWarning(DEPRECATION_FMT.format(**ctx)) + warnings.warn(w, stacklevel=stacklevel) + + +def deprecated(deprecation=None, removal=None, + alternative=None, description=None): + """Decorator for deprecated functions. + + A deprecation warning will be emitted when the function is called. + + :keyword deprecation: Version that marks first deprecation, if this + argument is not set a ``PendingDeprecationWarning`` will be emitted + instead. + :keyword removal: Future version when this feature will be removed. + :keyword alternative: Instructions for an alternative solution (if any). + :keyword description: Description of what is being deprecated. + + """ + def _inner(fun): + + @wraps(fun) + def __inner(*args, **kwargs): + from .imports import qualname + warn_deprecated(description=description or qualname(fun), + deprecation=deprecation, + removal=removal, + alternative=alternative, + stacklevel=3) + return fun(*args, **kwargs) + return __inner + return _inner + + +def deprecated_property(deprecation=None, removal=None, + alternative=None, description=None): + def _inner(fun): + return _deprecated_property( + fun, deprecation=deprecation, removal=removal, + alternative=alternative, description=description or fun.__name__) + return _inner + + +class _deprecated_property(object): + + def __init__(self, fget=None, fset=None, fdel=None, doc=None, **depreinfo): + self.__get = fget + self.__set = fset + self.__del = fdel + self.__name__, self.__module__, self.__doc__ = ( + fget.__name__, fget.__module__, fget.__doc__, + ) + self.depreinfo = depreinfo + self.depreinfo.setdefault('stacklevel', 3) + + def __get__(self, obj, type=None): + if obj is None: + return self + warn_deprecated(**self.depreinfo) + return self.__get(obj) + + def __set__(self, obj, value): + if obj is None: + return self + if self.__set is None: + raise AttributeError('cannot set attribute') + warn_deprecated(**self.depreinfo) + self.__set(obj, value) + + def __delete__(self, obj): + if obj is None: + return self + if self.__del is None: + raise AttributeError('cannot delete attribute') + warn_deprecated(**self.depreinfo) + self.__del(obj) + + def setter(self, fset): + return self.__class__(self.__get, fset, self.__del, **self.depreinfo) + + def deleter(self, fdel): + return self.__class__(self.__get, self.__set, fdel, **self.depreinfo) + + +def lpmerge(L, R): + """In place left precedent dictionary merge. + + Keeps values from `L`, if the value in `R` is :const:`None`.""" + set = L.__setitem__ + [set(k, v) for k, v in items(R) if v is not None] + return L + + +def is_iterable(obj): + try: + iter(obj) + except TypeError: + return False + return True + + +def fun_takes_kwargs(fun, kwlist=[]): + # deprecated + S = getattr(fun, 'argspec', getargspec(fun)) + if S.keywords is not None: + return kwlist + return [kw for kw in kwlist if kw in S.args] + + +def isatty(fh): + try: + return fh.isatty() + except AttributeError: + pass + + +def cry(out=None, sepchr='=', seplen=49): # pragma: no cover + """Return stacktrace of all active threads, + taken from https://gist.github.com/737056.""" + import threading + + out = WhateverIO() if out is None else out + P = partial(print, file=out) + + # get a map of threads by their ID so we can print their names + # during the traceback dump + tmap = dict((t.ident, t) for t in threading.enumerate()) + + sep = sepchr * seplen + for tid, frame in items(sys._current_frames()): + thread = tmap.get(tid) + if not thread: + # skip old junk (left-overs from a fork) + continue + P('{0.name}'.format(thread)) + P(sep) + traceback.print_stack(frame, file=out) + P(sep) + P('LOCAL VARIABLES') + P(sep) + pprint(frame.f_locals, stream=out) + P('\n') + return out.getvalue() + + +def maybe_reraise(): + """Re-raise if an exception is currently being handled, or return + otherwise.""" + exc_info = sys.exc_info() + try: + if exc_info[2]: + reraise(exc_info[0], exc_info[1], exc_info[2]) + finally: + # see http://docs.python.org/library/sys.html#sys.exc_info + del(exc_info) + + +def strtobool(term, table={'false': False, 'no': False, '0': False, + 'true': True, 'yes': True, '1': True, + 'on': True, 'off': False}): + """Convert common terms for true/false to bool + (true/false/yes/no/on/off/1/0).""" + if isinstance(term, string_t): + try: + return table[term.lower()] + except KeyError: + raise TypeError('Cannot coerce {0!r} to type bool'.format(term)) + return term + + +def jsonify(obj, + builtin_types=(numbers.Real, string_t), key=None, + keyfilter=None, + unknown_type_filter=None): + """Transforms object making it suitable for json serialization""" + from kombu.abstract import Object as KombuDictType + _jsonify = partial(jsonify, builtin_types=builtin_types, key=key, + keyfilter=keyfilter, + unknown_type_filter=unknown_type_filter) + + if isinstance(obj, KombuDictType): + obj = obj.as_dict(recurse=True) + + if obj is None or isinstance(obj, builtin_types): + return obj + elif isinstance(obj, (tuple, list)): + return [_jsonify(v) for v in obj] + elif isinstance(obj, dict): + return dict((k, _jsonify(v, key=k)) + for k, v in items(obj) + if (keyfilter(k) if keyfilter else 1)) + elif isinstance(obj, datetime.datetime): + # See "Date Time String Format" in the ECMA-262 specification. + r = obj.isoformat() + if obj.microsecond: + r = r[:23] + r[26:] + if r.endswith('+00:00'): + r = r[:-6] + 'Z' + return r + elif isinstance(obj, datetime.date): + return obj.isoformat() + elif isinstance(obj, datetime.time): + r = obj.isoformat() + if obj.microsecond: + r = r[:12] + return r + elif isinstance(obj, datetime.timedelta): + return str(obj) + else: + if unknown_type_filter is None: + raise ValueError( + 'Unsupported type: {0!r} {1!r} (parent: {2})'.format( + type(obj), obj, key)) + return unknown_type_filter(obj) + + +def gen_task_name(app, name, module_name): + """Generate task name from name/module pair.""" + try: + module = sys.modules[module_name] + except KeyError: + # Fix for manage.py shell_plus (Issue #366) + module = None + + if module is not None: + module_name = module.__name__ + # - If the task module is used as the __main__ script + # - we need to rewrite the module part of the task name + # - to match App.main. + if MP_MAIN_FILE and module.__file__ == MP_MAIN_FILE: + # - see comment about :envvar:`MP_MAIN_FILE` above. + module_name = '__main__' + if module_name == '__main__' and app.main: + return '.'.join([app.main, name]) + return '.'.join(p for p in (module_name, name) if p) + + +def nodename(name, hostname): + """Create node name from name/hostname pair.""" + return NODENAME_SEP.join((name, hostname)) + + +def anon_nodename(hostname=None, prefix='gen'): + return nodename(''.join([prefix, str(os.getpid())]), + hostname or socket.gethostname()) + + +def nodesplit(nodename): + """Split node name into tuple of name/hostname.""" + parts = nodename.split(NODENAME_SEP, 1) + if len(parts) == 1: + return None, parts[0] + return parts + + +def default_nodename(hostname): + name, host = nodesplit(hostname or '') + return nodename(name or NODENAME_DEFAULT, host or socket.gethostname()) + + +def node_format(s, nodename, **extra): + name, host = nodesplit(nodename) + return host_format( + s, host, n=name or NODENAME_DEFAULT, **extra) + + +def _fmt_process_index(prefix='', default='0'): + from .log import current_process_index + index = current_process_index() + return '{0}{1}'.format(prefix, index) if index else default +_fmt_process_index_with_prefix = partial(_fmt_process_index, '-', '') + + +def host_format(s, host=None, **extra): + host = host or socket.gethostname() + name, _, domain = host.partition('.') + keys = dict({ + 'h': host, 'n': name, 'd': domain, + 'i': _fmt_process_index, 'I': _fmt_process_index_with_prefix, + }, **extra) + return simple_format(s, keys) + + +def simple_format(s, keys, pattern=RE_FORMAT, expand=r'\1'): + if s: + keys.setdefault('%', '%') + + def resolve(match): + key = match.expand(expand) + try: + resolver = keys[key] + except KeyError: + raise ValueError(UNKNOWN_SIMPLE_FORMAT_KEY.format(key, s)) + if isinstance(resolver, Callable): + return resolver() + return resolver + + return pattern.sub(resolve, s) + return s + + +# ------------------------------------------------------------------------ # +# > XXX Compat +from .log import LOG_LEVELS # noqa +from .imports import ( # noqa + qualname as get_full_cls_name, symbol_by_name as get_cls_by_name, + instantiate, import_from_cwd +) +from .functional import chunks, noop # noqa +from kombu.utils import cached_property, kwdict, uuid # noqa +gen_unique_id = uuid diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/compat.py b/thesisenv/lib/python3.6/site-packages/celery/utils/compat.py new file mode 100644 index 0000000..6f62964 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/compat.py @@ -0,0 +1 @@ +from celery.five import * # noqa diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/debug.py b/thesisenv/lib/python3.6/site-packages/celery/utils/debug.py new file mode 100644 index 0000000..09c6ec8 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/debug.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +""" + celery.utils.debug + ~~~~~~~~~~~~~~~~~~ + + Utilities for debugging memory usage. + +""" +from __future__ import absolute_import, print_function, unicode_literals + +import os + +from contextlib import contextmanager +from functools import partial + +from celery.five import range +from celery.platforms import signals + +try: + from psutil import Process +except ImportError: + Process = None # noqa + +__all__ = [ + 'blockdetection', 'sample_mem', 'memdump', 'sample', + 'humanbytes', 'mem_rss', 'ps', +] + +UNITS = ( + (2 ** 40.0, 'TB'), + (2 ** 30.0, 'GB'), + (2 ** 20.0, 'MB'), + (2 ** 10.0, 'kB'), + (0.0, '{0!d}b'), +) + +_process = None +_mem_sample = [] + + +def _on_blocking(signum, frame): + import inspect + raise RuntimeError( + 'Blocking detection timed-out at: {0}'.format( + inspect.getframeinfo(frame) + ) + ) + + +@contextmanager +def blockdetection(timeout): + """A timeout context using ``SIGALRM`` that can be used to detect blocking + functions.""" + if not timeout: + yield + else: + old_handler = signals['ALRM'] + old_handler = None if old_handler == _on_blocking else old_handler + + signals['ALRM'] = _on_blocking + + try: + yield signals.arm_alarm(timeout) + finally: + if old_handler: + signals['ALRM'] = old_handler + signals.reset_alarm() + + +def sample_mem(): + """Sample RSS memory usage. + + Statistics can then be output by calling :func:`memdump`. + + """ + current_rss = mem_rss() + _mem_sample.append(current_rss) + return current_rss + + +def _memdump(samples=10): + S = _mem_sample + prev = list(S) if len(S) <= samples else sample(S, samples) + _mem_sample[:] = [] + import gc + gc.collect() + after_collect = mem_rss() + return prev, after_collect + + +def memdump(samples=10, file=None): + """Dump memory statistics. + + Will print a sample of all RSS memory samples added by + calling :func:`sample_mem`, and in addition print + used RSS memory after :func:`gc.collect`. + + """ + say = partial(print, file=file) + if ps() is None: + say('- rss: (psutil not installed).') + return + prev, after_collect = _memdump(samples) + if prev: + say('- rss (sample):') + for mem in prev: + say('- > {0},'.format(mem)) + say('- rss (end): {0}.'.format(after_collect)) + + +def sample(x, n, k=0): + """Given a list `x` a sample of length ``n`` of that list is returned. + + E.g. if `n` is 10, and `x` has 100 items, a list of every 10th + item is returned. + + ``k`` can be used as offset. + + """ + j = len(x) // n + for _ in range(n): + try: + yield x[k] + except IndexError: + break + k += j + + +def hfloat(f, p=5): + """Convert float to value suitable for humans. + + :keyword p: Float precision. + + """ + i = int(f) + return i if i == f else '{0:.{p}}'.format(f, p=p) + + +def humanbytes(s): + """Convert bytes to human-readable form (e.g. kB, MB).""" + return next( + '{0}{1}'.format(hfloat(s / div if div else s), unit) + for div, unit in UNITS if s >= div + ) + + +def mem_rss(): + """Return RSS memory usage as a humanized string.""" + p = ps() + if p is not None: + return humanbytes(_process_memory_info(p).rss) + + +def ps(): + """Return the global :class:`psutil.Process` instance, + or :const:`None` if :mod:`psutil` is not installed.""" + global _process + if _process is None and Process is not None: + _process = Process(os.getpid()) + return _process + + +def _process_memory_info(process): + try: + return process.memory_info() + except AttributeError: + return process.get_memory_info() diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/__init__.py new file mode 100644 index 0000000..b6e8d0b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from .signal import Signal + +__all__ = ['Signal'] diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/saferef.py b/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/saferef.py new file mode 100644 index 0000000..cd818bb --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/saferef.py @@ -0,0 +1,286 @@ +# -*- coding: utf-8 -*- +""" +"Safe weakrefs", originally from pyDispatcher. + +Provides a way to safely weakref any function, including bound methods (which +aren't handled by the core weakref module). +""" +from __future__ import absolute_import + +import sys +import traceback +import weakref + +__all__ = ['safe_ref'] + +PY3 = sys.version_info[0] == 3 + + +def safe_ref(target, on_delete=None): # pragma: no cover + """Return a *safe* weak reference to a callable target + + :param target: the object to be weakly referenced, if it's a + bound method reference, will create a :class:`BoundMethodWeakref`, + otherwise creates a simple :class:`weakref.ref`. + + :keyword on_delete: if provided, will have a hard reference stored + to the callable to be called after the safe reference + goes out of scope with the reference object, (either a + :class:`weakref.ref` or a :class:`BoundMethodWeakref`) as argument. + """ + if getattr(target, '__self__', None) is not None: + # Turn a bound method into a BoundMethodWeakref instance. + # Keep track of these instances for lookup by disconnect(). + assert hasattr(target, '__func__'), \ + """safe_ref target {0!r} has __self__, but no __func__: \ + don't know how to create reference""".format(target) + return get_bound_method_weakref(target=target, + on_delete=on_delete) + if callable(on_delete): + return weakref.ref(target, on_delete) + else: + return weakref.ref(target) + + +class BoundMethodWeakref(object): # pragma: no cover + """'Safe' and reusable weak references to instance methods. + + BoundMethodWeakref objects provide a mechanism for + referencing a bound method without requiring that the + method object itself (which is normally a transient + object) is kept alive. Instead, the BoundMethodWeakref + object keeps weak references to both the object and the + function which together define the instance method. + + .. attribute:: key + + the identity key for the reference, calculated + by the class's :meth:`calculate_key` method applied to the + target instance method + + .. attribute:: deletion_methods + + sequence of callable objects taking + single argument, a reference to this object which + will be called when *either* the target object or + target function is garbage collected (i.e. when + this object becomes invalid). These are specified + as the on_delete parameters of :func:`safe_ref` calls. + + .. attribute:: weak_self + + weak reference to the target object + + .. attribute:: weak_fun + + weak reference to the target function + + .. attribute:: _all_instances + + class attribute pointing to all live + BoundMethodWeakref objects indexed by the class's + `calculate_key(target)` method applied to the target + objects. This weak value dictionary is used to + short-circuit creation so that multiple references + to the same (object, function) pair produce the + same BoundMethodWeakref instance. + + """ + + _all_instances = weakref.WeakValueDictionary() + + def __new__(cls, target, on_delete=None, *arguments, **named): + """Create new instance or return current instance + + Basically this method of construction allows us to + short-circuit creation of references to already- + referenced instance methods. The key corresponding + to the target is calculated, and if there is already + an existing reference, that is returned, with its + deletionMethods attribute updated. Otherwise the + new instance is created and registered in the table + of already-referenced methods. + + """ + key = cls.calculate_key(target) + current = cls._all_instances.get(key) + if current is not None: + current.deletion_methods.append(on_delete) + return current + else: + base = super(BoundMethodWeakref, cls).__new__(cls) + cls._all_instances[key] = base + base.__init__(target, on_delete, *arguments, **named) + return base + + def __init__(self, target, on_delete=None): + """Return a weak-reference-like instance for a bound method + + :param target: the instance-method target for the weak + reference, must have `__self__` and `__func__` attributes + and be reconstructable via:: + + target.__func__.__get__(target.__self__) + + which is true of built-in instance methods. + + :keyword on_delete: optional callback which will be called + when this weak reference ceases to be valid + (i.e. either the object or the function is garbage + collected). Should take a single argument, + which will be passed a pointer to this object. + + """ + def remove(weak, self=self): + """Set self.is_dead to true when method or instance is destroyed""" + methods = self.deletion_methods[:] + del(self.deletion_methods[:]) + try: + del(self.__class__._all_instances[self.key]) + except KeyError: + pass + for function in methods: + try: + if callable(function): + function(self) + except Exception as exc: + try: + traceback.print_exc() + except AttributeError: + print('Exception during saferef {0} cleanup function ' + '{1}: {2}'.format(self, function, exc)) + + self.deletion_methods = [on_delete] + self.key = self.calculate_key(target) + self.weak_self = weakref.ref(target.__self__, remove) + self.weak_fun = weakref.ref(target.__func__, remove) + self.self_name = str(target.__self__) + self.fun_name = str(target.__func__.__name__) + + def calculate_key(cls, target): + """Calculate the reference key for this reference + + Currently this is a two-tuple of the `id()`'s of the + target object and the target function respectively. + """ + return id(target.__self__), id(target.__func__) + calculate_key = classmethod(calculate_key) + + def __str__(self): + """Give a friendly representation of the object""" + return '{0}( {1}.{2} )'.format( + type(self).__name__, + self.self_name, + self.fun_name, + ) + + __repr__ = __str__ + + def __bool__(self): + """Whether we are still a valid reference""" + return self() is not None + __nonzero__ = __bool__ # py2 + + if not PY3: + def __cmp__(self, other): + """Compare with another reference""" + if not isinstance(other, self.__class__): + return cmp(self.__class__, type(other)) # noqa + return cmp(self.key, other.key) # noqa + + def __call__(self): + """Return a strong reference to the bound method + + If the target cannot be retrieved, then will + return None, otherwise return a bound instance + method for our object and function. + + Note: + You may call this method any number of times, + as it does not invalidate the reference. + """ + target = self.weak_self() + if target is not None: + function = self.weak_fun() + if function is not None: + return function.__get__(target) + + +class BoundNonDescriptorMethodWeakref(BoundMethodWeakref): # pragma: no cover + """A specialized :class:`BoundMethodWeakref`, for platforms where + instance methods are not descriptors. + + It assumes that the function name and the target attribute name are the + same, instead of assuming that the function is a descriptor. This approach + is equally fast, but not 100% reliable because functions can be stored on + an attribute named differenty than the function's name such as in:: + + >>> class A(object): + ... pass + + >>> def foo(self): + ... return 'foo' + >>> A.bar = foo + + But this shouldn't be a common use case. So, on platforms where methods + aren't descriptors (such as Jython) this implementation has the advantage + of working in the most cases. + + """ + def __init__(self, target, on_delete=None): + """Return a weak-reference-like instance for a bound method + + :param target: the instance-method target for the weak + reference, must have `__self__` and `__func__` attributes + and be reconstructable via:: + + target.__func__.__get__(target.__self__) + + which is true of built-in instance methods. + + :keyword on_delete: optional callback which will be called + when this weak reference ceases to be valid + (i.e. either the object or the function is garbage + collected). Should take a single argument, + which will be passed a pointer to this object. + + """ + assert getattr(target.__self__, target.__name__) == target + super(BoundNonDescriptorMethodWeakref, self).__init__(target, + on_delete) + + def __call__(self): + """Return a strong reference to the bound method + + If the target cannot be retrieved, then will + return None, otherwise return a bound instance + method for our object and function. + + Note: + You may call this method any number of times, + as it does not invalidate the reference. + + """ + target = self.weak_self() + if target is not None: + function = self.weak_fun() + if function is not None: + # Using curry() would be another option, but it erases the + # "signature" of the function. That is, after a function is + # curried, the inspect module can't be used to determine how + # many arguments the function expects, nor what keyword + # arguments it supports, and pydispatcher needs this + # information. + return getattr(target, function.__name__) + + +def get_bound_method_weakref(target, on_delete): # pragma: no cover + """Instantiates the appropiate :class:`BoundMethodWeakRef`, depending + on the details of the underlying class method implementation.""" + if hasattr(target, '__get__'): + # target method is a descriptor, so the default implementation works: + return BoundMethodWeakref(target=target, on_delete=on_delete) + else: + # no luck, use the alternative implementation: + return BoundNonDescriptorMethodWeakref(target=target, + on_delete=on_delete) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/signal.py b/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/signal.py new file mode 100644 index 0000000..7d4b337 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/dispatch/signal.py @@ -0,0 +1,241 @@ +# -*- coding: utf-8 -*- +"""Signal class.""" +from __future__ import absolute_import + +import weakref +from . import saferef + +from celery.five import range +from celery.local import PromiseProxy, Proxy + +__all__ = ['Signal'] + +WEAKREF_TYPES = (weakref.ReferenceType, saferef.BoundMethodWeakref) + + +def _make_id(target): # pragma: no cover + if isinstance(target, Proxy): + target = target._get_current_object() + if hasattr(target, '__func__'): + return (id(target.__self__), id(target.__func__)) + return id(target) + + +class Signal(object): # pragma: no cover + """Base class for all signals + + + .. attribute:: receivers + Internal attribute, holds a dictionary of + `{receiverkey (id): weakref(receiver)}` mappings. + + """ + + def __init__(self, providing_args=None): + """Create a new signal. + + :param providing_args: A list of the arguments this signal can pass + along in a :meth:`send` call. + + """ + self.receivers = [] + if providing_args is None: + providing_args = [] + self.providing_args = set(providing_args) + + def _connect_proxy(self, fun, sender, weak, dispatch_uid): + return self.connect( + fun, sender=sender._get_current_object(), + weak=weak, dispatch_uid=dispatch_uid, + ) + + def connect(self, *args, **kwargs): + """Connect receiver to sender for signal. + + :param receiver: A function or an instance method which is to + receive signals. Receivers must be hashable objects. + + if weak is :const:`True`, then receiver must be weak-referencable + (more precisely :func:`saferef.safe_ref()` must be able to create a + reference to the receiver). + + Receivers must be able to accept keyword arguments. + + If receivers have a `dispatch_uid` attribute, the receiver will + not be added if another receiver already exists with that + `dispatch_uid`. + + :keyword sender: The sender to which the receiver should respond. + Must either be of type :class:`Signal`, or :const:`None` to receive + events from any sender. + + :keyword weak: Whether to use weak references to the receiver. + By default, the module will attempt to use weak references to the + receiver objects. If this parameter is false, then strong + references will be used. + + :keyword dispatch_uid: An identifier used to uniquely identify a + particular instance of a receiver. This will usually be a + string, though it may be anything hashable. + + """ + def _handle_options(sender=None, weak=True, dispatch_uid=None): + + def _connect_signal(fun): + receiver = fun + + if isinstance(sender, PromiseProxy): + sender.__then__( + self._connect_proxy, fun, sender, weak, dispatch_uid, + ) + return fun + + if dispatch_uid: + lookup_key = (dispatch_uid, _make_id(sender)) + else: + lookup_key = (_make_id(receiver), _make_id(sender)) + + if weak: + receiver = saferef.safe_ref( + receiver, on_delete=self._remove_receiver, + ) + + for r_key, _ in self.receivers: + if r_key == lookup_key: + break + else: + self.receivers.append((lookup_key, receiver)) + + return fun + + return _connect_signal + + if args and callable(args[0]): + return _handle_options(*args[1:], **kwargs)(args[0]) + return _handle_options(*args, **kwargs) + + def disconnect(self, receiver=None, sender=None, weak=True, + dispatch_uid=None): + """Disconnect receiver from sender for signal. + + If weak references are used, disconnect need not be called. The + receiver will be removed from dispatch automatically. + + :keyword receiver: The registered receiver to disconnect. May be + none if `dispatch_uid` is specified. + + :keyword sender: The registered sender to disconnect. + + :keyword weak: The weakref state to disconnect. + + :keyword dispatch_uid: the unique identifier of the receiver + to disconnect + + """ + if dispatch_uid: + lookup_key = (dispatch_uid, _make_id(sender)) + else: + lookup_key = (_make_id(receiver), _make_id(sender)) + + for index in range(len(self.receivers)): + (r_key, _) = self.receivers[index] + if r_key == lookup_key: + del self.receivers[index] + break + + def send(self, sender, **named): + """Send signal from sender to all connected receivers. + + If any receiver raises an error, the error propagates back through + send, terminating the dispatch loop, so it is quite possible to not + have all receivers called if a raises an error. + + :param sender: The sender of the signal. Either a specific + object or :const:`None`. + + :keyword \*\*named: Named arguments which will be passed to receivers. + + :returns: a list of tuple pairs: `[(receiver, response), … ]`. + + """ + responses = [] + if not self.receivers: + return responses + + for receiver in self._live_receivers(_make_id(sender)): + response = receiver(signal=self, sender=sender, **named) + responses.append((receiver, response)) + return responses + + def send_robust(self, sender, **named): + """Send signal from sender to all connected receivers catching errors. + + :param sender: The sender of the signal. Can be any python object + (normally one registered with a connect if you actually want + something to occur). + + :keyword \*\*named: Named arguments which will be passed to receivers. + These arguments must be a subset of the argument names defined in + :attr:`providing_args`. + + :returns: a list of tuple pairs: `[(receiver, response), … ]`. + + :raises DispatcherKeyError: + + if any receiver raises an error (specifically any subclass of + :exc:`Exception`), the error instance is returned as the result + for that receiver. + + """ + responses = [] + if not self.receivers: + return responses + + # Call each receiver with whatever arguments it can accept. + # Return a list of tuple pairs [(receiver, response), … ]. + for receiver in self._live_receivers(_make_id(sender)): + try: + response = receiver(signal=self, sender=sender, **named) + except Exception as err: + responses.append((receiver, err)) + else: + responses.append((receiver, response)) + return responses + + def _live_receivers(self, senderkey): + """Filter sequence of receivers to get resolved, live receivers. + + This checks for weak references and resolves them, then returning only + live receivers. + + """ + none_senderkey = _make_id(None) + receivers = [] + + for (receiverkey, r_senderkey), receiver in self.receivers: + if r_senderkey == none_senderkey or r_senderkey == senderkey: + if isinstance(receiver, WEAKREF_TYPES): + # Dereference the weak reference. + receiver = receiver() + if receiver is not None: + receivers.append(receiver) + else: + receivers.append(receiver) + return receivers + + def _remove_receiver(self, receiver): + """Remove dead receivers from connections.""" + + to_remove = [] + for key, connected_receiver in self.receivers: + if connected_receiver == receiver: + to_remove.append(key) + for key in to_remove: + for idx, (r_key, _) in enumerate(self.receivers): + if r_key == key: + del self.receivers[idx] + + def __repr__(self): + return ''.format(type(self).__name__) + + __str__ = __repr__ diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/encoding.py b/thesisenv/lib/python3.6/site-packages/celery/utils/encoding.py new file mode 100644 index 0000000..3ddcd35 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/encoding.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +""" + celery.utils.encoding + ~~~~~~~~~~~~~~~~~~~~~ + + This module has moved to :mod:`kombu.utils.encoding`. + +""" +from __future__ import absolute_import + +from kombu.utils.encoding import ( # noqa + default_encode, default_encoding, bytes_t, bytes_to_str, str_t, + str_to_bytes, ensure_bytes, from_utf8, safe_str, safe_repr, +) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/functional.py b/thesisenv/lib/python3.6/site-packages/celery/utils/functional.py new file mode 100644 index 0000000..e55b812 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/functional.py @@ -0,0 +1,323 @@ +# -*- coding: utf-8 -*- +""" + celery.utils.functional + ~~~~~~~~~~~~~~~~~~~~~~~ + + Utilities for functions. + +""" +from __future__ import absolute_import + +import sys +import threading + +from functools import wraps +from itertools import islice + +from kombu.utils import cached_property +from kombu.utils.functional import lazy, maybe_evaluate, is_list, maybe_list +from kombu.utils.compat import OrderedDict + +from celery.five import UserDict, UserList, items, keys, range + +__all__ = ['LRUCache', 'is_list', 'maybe_list', 'memoize', 'mlazy', 'noop', + 'first', 'firstmethod', 'chunks', 'padlist', 'mattrgetter', 'uniq', + 'regen', 'dictfilter', 'lazy', 'maybe_evaluate'] + +IS_PYPY = hasattr(sys, 'pypy_version_info') + +KEYWORD_MARK = object() + + +class DummyContext(object): + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + pass + + +class LRUCache(UserDict): + """LRU Cache implementation using a doubly linked list to track access. + + :keyword limit: The maximum number of keys to keep in the cache. + When a new key is inserted and the limit has been exceeded, + the *Least Recently Used* key will be discarded from the + cache. + + """ + + def __init__(self, limit=None): + self.limit = limit + self.mutex = threading.RLock() + self.data = OrderedDict() + + def __getitem__(self, key): + with self.mutex: + value = self[key] = self.data.pop(key) + return value + + def update(self, *args, **kwargs): + with self.mutex: + data, limit = self.data, self.limit + data.update(*args, **kwargs) + if limit and len(data) > limit: + # pop additional items in case limit exceeded + for _ in range(len(data) - limit): + data.popitem(last=False) + + def popitem(self, last=True): + with self.mutex: + return self.data.popitem(last) + + def __setitem__(self, key, value): + # remove least recently used key. + with self.mutex: + if self.limit and len(self.data) >= self.limit: + self.data.pop(next(iter(self.data))) + self.data[key] = value + + def __iter__(self): + return iter(self.data) + + def _iterate_items(self, _need_lock=IS_PYPY): + with self.mutex if _need_lock else DummyContext(): + for k in self: + try: + yield (k, self.data[k]) + except KeyError: # pragma: no cover + pass + iteritems = _iterate_items + + def _iterate_values(self, _need_lock=IS_PYPY): + with self.mutex if _need_lock else DummyContext(): + for k in self: + try: + yield self.data[k] + except KeyError: # pragma: no cover + pass + + itervalues = _iterate_values + + def _iterate_keys(self): + # userdict.keys in py3k calls __getitem__ + return keys(self.data) + iterkeys = _iterate_keys + + def incr(self, key, delta=1): + with self.mutex: + # this acts as memcached does- store as a string, but return a + # integer as long as it exists and we can cast it + newval = int(self.data.pop(key)) + delta + self[key] = str(newval) + return newval + + def __getstate__(self): + d = dict(vars(self)) + d.pop('mutex') + return d + + def __setstate__(self, state): + self.__dict__ = state + self.mutex = threading.RLock() + + if sys.version_info[0] == 3: # pragma: no cover + keys = _iterate_keys + values = _iterate_values + items = _iterate_items + else: # noqa + + def keys(self): + return list(self._iterate_keys()) + + def values(self): + return list(self._iterate_values()) + + def items(self): + return list(self._iterate_items()) + + +def memoize(maxsize=None, keyfun=None, Cache=LRUCache): + + def _memoize(fun): + mutex = threading.Lock() + cache = Cache(limit=maxsize) + + @wraps(fun) + def _M(*args, **kwargs): + if keyfun: + key = keyfun(args, kwargs) + else: + key = args + (KEYWORD_MARK, ) + tuple(sorted(kwargs.items())) + try: + with mutex: + value = cache[key] + except KeyError: + value = fun(*args, **kwargs) + _M.misses += 1 + with mutex: + cache[key] = value + else: + _M.hits += 1 + return value + + def clear(): + """Clear the cache and reset cache statistics.""" + cache.clear() + _M.hits = _M.misses = 0 + + _M.hits = _M.misses = 0 + _M.clear = clear + _M.original_func = fun + return _M + + return _memoize + + +class mlazy(lazy): + """Memoized lazy evaluation. + + The function is only evaluated once, every subsequent access + will return the same value. + + .. attribute:: evaluated + + Set to to :const:`True` after the object has been evaluated. + + """ + evaluated = False + _value = None + + def evaluate(self): + if not self.evaluated: + self._value = super(mlazy, self).evaluate() + self.evaluated = True + return self._value + + +def noop(*args, **kwargs): + """No operation. + + Takes any arguments/keyword arguments and does nothing. + + """ + pass + + +def first(predicate, it): + """Return the first element in `iterable` that `predicate` Gives a + :const:`True` value for. + + If `predicate` is None it will return the first item that is not None. + + """ + return next( + (v for v in it if (predicate(v) if predicate else v is not None)), + None, + ) + + +def firstmethod(method): + """Return a function that with a list of instances, + finds the first instance that gives a value for the given method. + + The list can also contain lazy instances + (:class:`~kombu.utils.functional.lazy`.) + + """ + + def _matcher(it, *args, **kwargs): + for obj in it: + try: + answer = getattr(maybe_evaluate(obj), method)(*args, **kwargs) + except AttributeError: + pass + else: + if answer is not None: + return answer + + return _matcher + + +def chunks(it, n): + """Split an iterator into chunks with `n` elements each. + + Examples + + # n == 2 + >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 2) + >>> list(x) + [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10]] + + # n == 3 + >>> x = chunks(iter([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), 3) + >>> list(x) + [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10]] + + """ + # XXX This function is not used anymore, at least not by Celery itself. + for first in it: + yield [first] + list(islice(it, n - 1)) + + +def padlist(container, size, default=None): + """Pad list with default elements. + + Examples: + + >>> first, last, city = padlist(['George', 'Costanza', 'NYC'], 3) + ('George', 'Costanza', 'NYC') + >>> first, last, city = padlist(['George', 'Costanza'], 3) + ('George', 'Costanza', None) + >>> first, last, city, planet = padlist( + ... ['George', 'Costanza', 'NYC'], 4, default='Earth', + ... ) + ('George', 'Costanza', 'NYC', 'Earth') + + """ + return list(container)[:size] + [default] * (size - len(container)) + + +def mattrgetter(*attrs): + """Like :func:`operator.itemgetter` but return :const:`None` on missing + attributes instead of raising :exc:`AttributeError`.""" + return lambda obj: dict((attr, getattr(obj, attr, None)) + for attr in attrs) + + +def uniq(it): + """Return all unique elements in ``it``, preserving order.""" + seen = set() + return (seen.add(obj) or obj for obj in it if obj not in seen) + + +def regen(it): + """Regen takes any iterable, and if the object is an + generator it will cache the evaluated list on first access, + so that the generator can be "consumed" multiple times.""" + if isinstance(it, (list, tuple)): + return it + return _regen(it) + + +class _regen(UserList, list): + # must be subclass of list so that json can encode. + def __init__(self, it): + self.__it = it + + def __reduce__(self): + return list, (self.data, ) + + def __length_hint__(self): + return self.__it.__length_hint__() + + @cached_property + def data(self): + return list(self.__it) + + +def dictfilter(d=None, **kw): + """Remove all keys from dict ``d`` whose value is :const:`None`""" + d = kw if d is None else (dict(d, **kw) if kw else d) + return dict((k, v) for k, v in items(d) if v is not None) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/imports.py b/thesisenv/lib/python3.6/site-packages/celery/utils/imports.py new file mode 100644 index 0000000..22a2fdc --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/imports.py @@ -0,0 +1,114 @@ +# -*- coding: utf-8 -*- +""" + celery.utils.import + ~~~~~~~~~~~~~~~~~~~ + + Utilities related to importing modules and symbols by name. + +""" +from __future__ import absolute_import + +import imp as _imp +import importlib +import os +import sys + +from contextlib import contextmanager + +from kombu.utils import symbol_by_name + +from celery.five import reload + +__all__ = [ + 'NotAPackage', 'qualname', 'instantiate', 'symbol_by_name', 'cwd_in_path', + 'find_module', 'import_from_cwd', 'reload_from_cwd', 'module_file', +] + + +class NotAPackage(Exception): + pass + + +if sys.version_info > (3, 3): # pragma: no cover + def qualname(obj): + if not hasattr(obj, '__name__') and hasattr(obj, '__class__'): + obj = obj.__class__ + q = getattr(obj, '__qualname__', None) + if '.' not in q: + q = '.'.join((obj.__module__, q)) + return q +else: + def qualname(obj): # noqa + if not hasattr(obj, '__name__') and hasattr(obj, '__class__'): + obj = obj.__class__ + return '.'.join((obj.__module__, obj.__name__)) + + +def instantiate(name, *args, **kwargs): + """Instantiate class by name. + + See :func:`symbol_by_name`. + + """ + return symbol_by_name(name)(*args, **kwargs) + + +@contextmanager +def cwd_in_path(): + cwd = os.getcwd() + if cwd in sys.path: + yield + else: + sys.path.insert(0, cwd) + try: + yield cwd + finally: + try: + sys.path.remove(cwd) + except ValueError: # pragma: no cover + pass + + +def find_module(module, path=None, imp=None): + """Version of :func:`imp.find_module` supporting dots.""" + if imp is None: + imp = importlib.import_module + with cwd_in_path(): + if '.' in module: + last = None + parts = module.split('.') + for i, part in enumerate(parts[:-1]): + mpart = imp('.'.join(parts[:i + 1])) + try: + path = mpart.__path__ + except AttributeError: + raise NotAPackage(module) + last = _imp.find_module(parts[i + 1], path) + return last + return _imp.find_module(module) + + +def import_from_cwd(module, imp=None, package=None): + """Import module, but make sure it finds modules + located in the current directory. + + Modules located in the current directory has + precedence over modules located in `sys.path`. + """ + if imp is None: + imp = importlib.import_module + with cwd_in_path(): + return imp(module, package=package) + + +def reload_from_cwd(module, reloader=None): + if reloader is None: + reloader = reload + with cwd_in_path(): + return reloader(module) + + +def module_file(module): + """Return the correct original file name of a module.""" + name = module.__file__ + return name[:-1] if name.endswith('.pyc') else name diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/iso8601.py b/thesisenv/lib/python3.6/site-packages/celery/utils/iso8601.py new file mode 100644 index 0000000..c951cf6 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/iso8601.py @@ -0,0 +1,77 @@ +""" +Originally taken from pyiso8601 (http://code.google.com/p/pyiso8601/) + +Modified to match the behavior of dateutil.parser: + + - raise ValueError instead of ParseError + - return naive datetimes by default + - uses pytz.FixedOffset + +This is the original License: + +Copyright (c) 2007 Michael Twomey + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +""" +from __future__ import absolute_import + +import re + +from datetime import datetime +from pytz import FixedOffset + +__all__ = ['parse_iso8601'] + +# Adapted from http://delete.me.uk/2005/03/iso8601.html +ISO8601_REGEX = re.compile( + r'(?P[0-9]{4})(-(?P[0-9]{1,2})(-(?P[0-9]{1,2})' + r'((?P.)(?P[0-9]{2}):(?P[0-9]{2})' + '(:(?P[0-9]{2})(\.(?P[0-9]+))?)?' + r'(?PZ|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?' +) +TIMEZONE_REGEX = re.compile( + '(?P[+-])(?P[0-9]{2}).(?P[0-9]{2})' +) + + +def parse_iso8601(datestring): + """Parse and convert ISO 8601 string into a datetime object""" + m = ISO8601_REGEX.match(datestring) + if not m: + raise ValueError('unable to parse date string %r' % datestring) + groups = m.groupdict() + tz = groups['timezone'] + if tz == 'Z': + tz = FixedOffset(0) + elif tz: + m = TIMEZONE_REGEX.match(tz) + prefix, hours, minutes = m.groups() + hours, minutes = int(hours), int(minutes) + if prefix == '-': + hours = -hours + minutes = -minutes + tz = FixedOffset(minutes + hours * 60) + frac = groups['fraction'] or 0 + return datetime( + int(groups['year']), int(groups['month']), int(groups['day']), + int(groups['hour']), int(groups['minute']), int(groups['second']), + int(frac), tz + ) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/log.py b/thesisenv/lib/python3.6/site-packages/celery/utils/log.py new file mode 100644 index 0000000..b786d39 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/log.py @@ -0,0 +1,301 @@ +# -*- coding: utf-8 -*- +""" + celery.utils.log + ~~~~~~~~~~~~~~~~ + + Logging utilities. + +""" +from __future__ import absolute_import, print_function + +import logging +import numbers +import os +import sys +import threading +import traceback + +from contextlib import contextmanager +from billiard import current_process, util as mputil +from kombu.five import values +from kombu.log import get_logger as _get_logger, LOG_LEVELS +from kombu.utils.encoding import safe_str + +from celery.five import string_t, text_t + +from .term import colored + +__all__ = ['ColorFormatter', 'LoggingProxy', 'base_logger', + 'set_in_sighandler', 'in_sighandler', 'get_logger', + 'get_task_logger', 'mlevel', 'ensure_process_aware_logger', + 'get_multiprocessing_logger', 'reset_multiprocessing_logger'] + +_process_aware = False +PY3 = sys.version_info[0] == 3 + +MP_LOG = os.environ.get('MP_LOG', False) + + +# Sets up our logging hierarchy. +# +# Every logger in the celery package inherits from the "celery" +# logger, and every task logger inherits from the "celery.task" +# logger. +base_logger = logger = _get_logger('celery') +mp_logger = _get_logger('multiprocessing') + +_in_sighandler = False + + +def set_in_sighandler(value): + global _in_sighandler + _in_sighandler = value + + +def iter_open_logger_fds(): + seen = set() + loggers = (list(values(logging.Logger.manager.loggerDict)) + + [logging.getLogger(None)]) + for logger in loggers: + try: + for handler in logger.handlers: + try: + if handler not in seen: + yield handler.stream + seen.add(handler) + except AttributeError: + pass + except AttributeError: # PlaceHolder does not have handlers + pass + + +@contextmanager +def in_sighandler(): + set_in_sighandler(True) + try: + yield + finally: + set_in_sighandler(False) + + +def logger_isa(l, p, max=1000): + this, seen = l, set() + for _ in range(max): + if this == p: + return True + else: + if this in seen: + raise RuntimeError( + 'Logger {0!r} parents recursive'.format(l), + ) + seen.add(this) + this = this.parent + if not this: + break + else: + raise RuntimeError('Logger hierarchy exceeds {0}'.format(max)) + return False + + +def get_logger(name): + l = _get_logger(name) + if logging.root not in (l, l.parent) and l is not base_logger: + if not logger_isa(l, base_logger): + l.parent = base_logger + return l +task_logger = get_logger('celery.task') +worker_logger = get_logger('celery.worker') + + +def get_task_logger(name): + logger = get_logger(name) + if not logger_isa(logger, task_logger): + logger.parent = task_logger + return logger + + +def mlevel(level): + if level and not isinstance(level, numbers.Integral): + return LOG_LEVELS[level.upper()] + return level + + +class ColorFormatter(logging.Formatter): + #: Loglevel -> Color mapping. + COLORS = colored().names + colors = {'DEBUG': COLORS['blue'], 'WARNING': COLORS['yellow'], + 'ERROR': COLORS['red'], 'CRITICAL': COLORS['magenta']} + + def __init__(self, fmt=None, use_color=True): + logging.Formatter.__init__(self, fmt) + self.use_color = use_color + + def formatException(self, ei): + if ei and not isinstance(ei, tuple): + ei = sys.exc_info() + r = logging.Formatter.formatException(self, ei) + if isinstance(r, str) and not PY3: + return safe_str(r) + return r + + def format(self, record): + msg = logging.Formatter.format(self, record) + color = self.colors.get(record.levelname) + + # reset exception info later for other handlers... + einfo = sys.exc_info() if record.exc_info == 1 else record.exc_info + + if color and self.use_color: + try: + # safe_str will repr the color object + # and color will break on non-string objects + # so need to reorder calls based on type. + # Issue #427 + try: + if isinstance(msg, string_t): + return text_t(color(safe_str(msg))) + return safe_str(color(msg)) + except UnicodeDecodeError: + return safe_str(msg) # skip colors + except Exception as exc: + prev_msg, record.exc_info, record.msg = ( + record.msg, 1, ''.format( + type(msg), exc + ), + ) + try: + return logging.Formatter.format(self, record) + finally: + record.msg, record.exc_info = prev_msg, einfo + else: + return safe_str(msg) + + +class LoggingProxy(object): + """Forward file object to :class:`logging.Logger` instance. + + :param logger: The :class:`logging.Logger` instance to forward to. + :param loglevel: Loglevel to use when writing messages. + + """ + mode = 'w' + name = None + closed = False + loglevel = logging.ERROR + _thread = threading.local() + + def __init__(self, logger, loglevel=None): + self.logger = logger + self.loglevel = mlevel(loglevel or self.logger.level or self.loglevel) + self._safewrap_handlers() + + def _safewrap_handlers(self): + """Make the logger handlers dump internal errors to + `sys.__stderr__` instead of `sys.stderr` to circumvent + infinite loops.""" + + def wrap_handler(handler): # pragma: no cover + + class WithSafeHandleError(logging.Handler): + + def handleError(self, record): + exc_info = sys.exc_info() + try: + try: + traceback.print_exception(exc_info[0], + exc_info[1], + exc_info[2], + None, sys.__stderr__) + except IOError: + pass # see python issue 5971 + finally: + del(exc_info) + + handler.handleError = WithSafeHandleError().handleError + return [wrap_handler(h) for h in self.logger.handlers] + + def write(self, data): + """Write message to logging object.""" + if _in_sighandler: + return print(safe_str(data), file=sys.__stderr__) + if getattr(self._thread, 'recurse_protection', False): + # Logger is logging back to this file, so stop recursing. + return + data = data.strip() + if data and not self.closed: + self._thread.recurse_protection = True + try: + self.logger.log(self.loglevel, safe_str(data)) + finally: + self._thread.recurse_protection = False + + def writelines(self, sequence): + """`writelines(sequence_of_strings) -> None`. + + Write the strings to the file. + + The sequence can be any iterable object producing strings. + This is equivalent to calling :meth:`write` for each string. + + """ + for part in sequence: + self.write(part) + + def flush(self): + """This object is not buffered so any :meth:`flush` requests + are ignored.""" + pass + + def close(self): + """When the object is closed, no write requests are forwarded to + the logging object anymore.""" + self.closed = True + + def isatty(self): + """Always return :const:`False`. Just here for file support.""" + return False + + +def ensure_process_aware_logger(force=False): + """Make sure process name is recorded when loggers are used.""" + global _process_aware + if force or not _process_aware: + logging._acquireLock() + try: + _process_aware = True + Logger = logging.getLoggerClass() + if getattr(Logger, '_process_aware', False): # pragma: no cover + return + + class ProcessAwareLogger(Logger): + _signal_safe = True + _process_aware = True + + def makeRecord(self, *args, **kwds): + record = Logger.makeRecord(self, *args, **kwds) + record.processName = current_process()._name + return record + + def log(self, *args, **kwargs): + if _in_sighandler: + return + return Logger.log(self, *args, **kwargs) + logging.setLoggerClass(ProcessAwareLogger) + finally: + logging._releaseLock() + + +def get_multiprocessing_logger(): + return mputil.get_logger() if mputil else None + + +def reset_multiprocessing_logger(): + if mputil and hasattr(mputil, '_logger'): + mputil._logger = None + + +def current_process_index(base=1): + if current_process: + index = getattr(current_process(), 'index', None) + return index + base if index is not None else index +ensure_process_aware_logger() diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/mail.py b/thesisenv/lib/python3.6/site-packages/celery/utils/mail.py new file mode 100644 index 0000000..00c5f29 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/mail.py @@ -0,0 +1,190 @@ +# -*- coding: utf-8 -*- +""" + celery.utils.mail + ~~~~~~~~~~~~~~~~~ + + How task error emails are formatted and sent. + +""" +from __future__ import absolute_import + +import smtplib +import socket +import traceback +import warnings + +from email.mime.text import MIMEText + +from .functional import maybe_list + +try: + from ssl import SSLError +except ImportError: # pragma: no cover + class SSLError(Exception): # noqa + """fallback used when ssl module not compiled.""" + +__all__ = ['SendmailWarning', 'Message', 'Mailer', 'ErrorMail'] + +_local_hostname = None + + +def get_local_hostname(): + global _local_hostname + if _local_hostname is None: + _local_hostname = socket.getfqdn() + return _local_hostname + + +class SendmailWarning(UserWarning): + """Problem happened while sending the email message.""" + + +class Message(object): + + def __init__(self, to=None, sender=None, subject=None, + body=None, charset='us-ascii'): + self.to = maybe_list(to) + self.sender = sender + self.subject = subject + self.body = body + self.charset = charset + + def __repr__(self): + return ''.format(self) + + def __str__(self): + msg = MIMEText(self.body, 'plain', self.charset) + msg['Subject'] = self.subject + msg['From'] = self.sender + msg['To'] = ', '.join(self.to) + return msg.as_string() + + +class Mailer(object): + + def __init__(self, host='localhost', port=0, user=None, password=None, + timeout=2, use_ssl=False, use_tls=False): + self.host = host + self.port = port + self.user = user + self.password = password + self.timeout = timeout + self.use_ssl = use_ssl + self.use_tls = use_tls + + def send(self, message, fail_silently=False, **kwargs): + try: + self._send(message, **kwargs) + except Exception as exc: + if not fail_silently: + raise + warnings.warn(SendmailWarning( + 'Mail could not be sent: {0!r} {1!r}\n{2!r}'.format( + exc, {'To': ', '.join(message.to), + 'Subject': message.subject}, + traceback.format_stack()))) + + def _send(self, message, **kwargs): + Client = smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP + client = Client(self.host, self.port, timeout=self.timeout, + local_hostname=get_local_hostname(), **kwargs) + + if self.use_tls: + client.ehlo() + client.starttls() + client.ehlo() + + if self.user and self.password: + client.login(self.user, self.password) + + client.sendmail(message.sender, message.to, str(message)) + try: + client.quit() + except SSLError: + client.close() + + +class ErrorMail(object): + """Defines how and when task error e-mails should be sent. + + :param task: The task instance that raised the error. + + :attr:`subject` and :attr:`body` are format strings which + are passed a context containing the following keys: + + * name + + Name of the task. + + * id + + UUID of the task. + + * exc + + String representation of the exception. + + * args + + Positional arguments. + + * kwargs + + Keyword arguments. + + * traceback + + String representation of the traceback. + + * hostname + + Worker nodename. + + """ + + # pep8.py borks on a inline signature separator and + # says "trailing whitespace" ;) + EMAIL_SIGNATURE_SEP = '-- ' + + #: Format string used to generate error email subjects. + subject = """\ + [{hostname}] Error: Task {name} ({id}): {exc!r} + """ + + #: Format string used to generate error email content. + body = """ +Task {{name}} with id {{id}} raised exception:\n{{exc!r}} + + +Task was called with args: {{args}} kwargs: {{kwargs}}. + +The contents of the full traceback was: + +{{traceback}} + +{EMAIL_SIGNATURE_SEP} +Just to let you know, +py-celery at {{hostname}}. +""".format(EMAIL_SIGNATURE_SEP=EMAIL_SIGNATURE_SEP) + + def __init__(self, task, **kwargs): + self.task = task + self.subject = kwargs.get('subject', self.subject) + self.body = kwargs.get('body', self.body) + + def should_send(self, context, exc): + """Return true or false depending on if a task error mail + should be sent for this type of error.""" + return True + + def format_subject(self, context): + return self.subject.strip().format(**context) + + def format_body(self, context): + return self.body.strip().format(**context) + + def send(self, context, exc, fail_silently=True): + if self.should_send(context, exc): + self.task.app.mail_admins(self.format_subject(context), + self.format_body(context), + fail_silently=fail_silently) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/objects.py b/thesisenv/lib/python3.6/site-packages/celery/utils/objects.py new file mode 100644 index 0000000..8a2f7f6 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/objects.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- +""" + celery.utils.objects + ~~~~~~~~~~~~~~~~~~~~ + + Object related utilities including introspection, etc. + +""" +from __future__ import absolute_import + +__all__ = ['mro_lookup'] + + +class Bunch(object): + """Object that enables you to modify attributes.""" + + def __init__(self, **kwargs): + self.__dict__.update(kwargs) + + +def mro_lookup(cls, attr, stop=(), monkey_patched=[]): + """Return the first node by MRO order that defines an attribute. + + :keyword stop: A list of types that if reached will stop the search. + :keyword monkey_patched: Use one of the stop classes if the attr's + module origin is not in this list, this to detect monkey patched + attributes. + + :returns None: if the attribute was not found. + + """ + for node in cls.mro(): + if node in stop: + try: + attr = node.__dict__[attr] + module_origin = attr.__module__ + except (AttributeError, KeyError): + pass + else: + if module_origin not in monkey_patched: + return node + return + if attr in node.__dict__: + return node + + +class FallbackContext(object): + """The built-in ``@contextmanager`` utility does not work well + when wrapping other contexts, as the traceback is wrong when + the wrapped context raises. + + This solves this problem and can be used instead of ``@contextmanager`` + in this example:: + + @contextmanager + def connection_or_default_connection(connection=None): + if connection: + # user already has a connection, should not close + # after use + yield connection + else: + # must have new connection, and also close the connection + # after the block returns + with create_new_connection() as connection: + yield connection + + This wrapper can be used instead for the above like this:: + + def connection_or_default_connection(connection=None): + return FallbackContext(connection, create_new_connection) + + """ + + def __init__(self, provided, fallback, *fb_args, **fb_kwargs): + self.provided = provided + self.fallback = fallback + self.fb_args = fb_args + self.fb_kwargs = fb_kwargs + self._context = None + + def __enter__(self): + if self.provided is not None: + return self.provided + context = self._context = self.fallback( + *self.fb_args, **self.fb_kwargs + ).__enter__() + return context + + def __exit__(self, *exc_info): + if self._context is not None: + return self._context.__exit__(*exc_info) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/serialization.py b/thesisenv/lib/python3.6/site-packages/celery/utils/serialization.py new file mode 100644 index 0000000..d5509f1 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/serialization.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +""" + celery.utils.serialization + ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Utilities for safely pickling exceptions. + +""" +from __future__ import absolute_import + +from inspect import getmro +from itertools import takewhile + +try: + import cPickle as pickle +except ImportError: + import pickle # noqa + +from .encoding import safe_repr + +__all__ = ['UnpickleableExceptionWrapper', 'subclass_exception', + 'find_pickleable_exception', 'create_exception_cls', + 'get_pickleable_exception', 'get_pickleable_etype', + 'get_pickled_exception'] + +#: List of base classes we probably don't want to reduce to. +try: + unwanted_base_classes = (StandardError, Exception, BaseException, object) +except NameError: # pragma: no cover + unwanted_base_classes = (Exception, BaseException, object) # py3k + + +def subclass_exception(name, parent, module): # noqa + return type(name, (parent, ), {'__module__': module}) + + +def find_pickleable_exception(exc, loads=pickle.loads, + dumps=pickle.dumps): + """With an exception instance, iterate over its super classes (by mro) + and find the first super exception that is pickleable. It does + not go below :exc:`Exception` (i.e. it skips :exc:`Exception`, + :class:`BaseException` and :class:`object`). If that happens + you should use :exc:`UnpickleableException` instead. + + :param exc: An exception instance. + + Will return the nearest pickleable parent exception class + (except :exc:`Exception` and parents), or if the exception is + pickleable it will return :const:`None`. + + :rtype :exc:`Exception`: + + """ + exc_args = getattr(exc, 'args', []) + for supercls in itermro(exc.__class__, unwanted_base_classes): + try: + superexc = supercls(*exc_args) + loads(dumps(superexc)) + except: + pass + else: + return superexc +find_nearest_pickleable_exception = find_pickleable_exception # XXX compat + + +def itermro(cls, stop): + return takewhile(lambda sup: sup not in stop, getmro(cls)) + + +def create_exception_cls(name, module, parent=None): + """Dynamically create an exception class.""" + if not parent: + parent = Exception + return subclass_exception(name, parent, module) + + +class UnpickleableExceptionWrapper(Exception): + """Wraps unpickleable exceptions. + + :param exc_module: see :attr:`exc_module`. + :param exc_cls_name: see :attr:`exc_cls_name`. + :param exc_args: see :attr:`exc_args` + + **Example** + + .. code-block:: python + + >>> def pickle_it(raising_function): + ... try: + ... raising_function() + ... except Exception as e: + ... exc = UnpickleableExceptionWrapper( + ... e.__class__.__module__, + ... e.__class__.__name__, + ... e.args, + ... ) + ... pickle.dumps(exc) # Works fine. + + """ + + #: The module of the original exception. + exc_module = None + + #: The name of the original exception class. + exc_cls_name = None + + #: The arguments for the original exception. + exc_args = None + + def __init__(self, exc_module, exc_cls_name, exc_args, text=None): + safe_exc_args = [] + for arg in exc_args: + try: + pickle.dumps(arg) + safe_exc_args.append(arg) + except Exception: + safe_exc_args.append(safe_repr(arg)) + self.exc_module = exc_module + self.exc_cls_name = exc_cls_name + self.exc_args = safe_exc_args + self.text = text + Exception.__init__(self, exc_module, exc_cls_name, safe_exc_args, text) + + def restore(self): + return create_exception_cls(self.exc_cls_name, + self.exc_module)(*self.exc_args) + + def __str__(self): + return self.text + + @classmethod + def from_exception(cls, exc): + return cls(exc.__class__.__module__, + exc.__class__.__name__, + getattr(exc, 'args', []), + safe_repr(exc)) + + +def get_pickleable_exception(exc): + """Make sure exception is pickleable.""" + try: + pickle.loads(pickle.dumps(exc)) + except Exception: + pass + else: + return exc + nearest = find_pickleable_exception(exc) + if nearest: + return nearest + return UnpickleableExceptionWrapper.from_exception(exc) + + +def get_pickleable_etype(cls, loads=pickle.loads, dumps=pickle.dumps): + try: + loads(dumps(cls)) + except: + return Exception + else: + return cls + + +def get_pickled_exception(exc): + """Get original exception from exception pickled using + :meth:`get_pickleable_exception`.""" + if isinstance(exc, UnpickleableExceptionWrapper): + return exc.restore() + return exc diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/sysinfo.py b/thesisenv/lib/python3.6/site-packages/celery/utils/sysinfo.py new file mode 100644 index 0000000..65073a6 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/sysinfo.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +import os + +from math import ceil + +from kombu.utils import cached_property + +__all__ = ['load_average', 'df'] + + +if hasattr(os, 'getloadavg'): + + def load_average(): + return tuple(ceil(l * 1e2) / 1e2 for l in os.getloadavg()) + +else: # pragma: no cover + # Windows doesn't have getloadavg + def load_average(): # noqa + return (0.0, 0.0, 0.0) + + +class df(object): + + def __init__(self, path): + self.path = path + + @property + def total_blocks(self): + return self.stat.f_blocks * self.stat.f_frsize / 1024 + + @property + def available(self): + return self.stat.f_bavail * self.stat.f_frsize / 1024 + + @property + def capacity(self): + avail = self.stat.f_bavail + used = self.stat.f_blocks - self.stat.f_bfree + return int(ceil(used * 100.0 / (used + avail) + 0.5)) + + @cached_property + def stat(self): + return os.statvfs(os.path.abspath(self.path)) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/term.py b/thesisenv/lib/python3.6/site-packages/celery/utils/term.py new file mode 100644 index 0000000..430c695 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/term.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- +""" + celery.utils.term + ~~~~~~~~~~~~~~~~~ + + Terminals and colors. + +""" +from __future__ import absolute_import, unicode_literals + +import platform + +from functools import reduce + +from kombu.utils.encoding import safe_str +from celery.five import string + +__all__ = ['colored'] + +IS_WINDOWS = platform.system() == 'Windows' + +BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) +OP_SEQ = '\033[%dm' +RESET_SEQ = '\033[0m' +COLOR_SEQ = '\033[1;%dm' + + +def fg(s): + return COLOR_SEQ % s + + +class colored(object): + """Terminal colored text. + + Example:: + >>> c = colored(enabled=True) + >>> print(str(c.red('the quick '), c.blue('brown ', c.bold('fox ')), + ... c.magenta(c.underline('jumps over')), + ... c.yellow(' the lazy '), + ... c.green('dog '))) + + """ + + def __init__(self, *s, **kwargs): + self.s = s + self.enabled = not IS_WINDOWS and kwargs.get('enabled', True) + self.op = kwargs.get('op', '') + self.names = {'black': self.black, + 'red': self.red, + 'green': self.green, + 'yellow': self.yellow, + 'blue': self.blue, + 'magenta': self.magenta, + 'cyan': self.cyan, + 'white': self.white} + + def _add(self, a, b): + return string(a) + string(b) + + def _fold_no_color(self, a, b): + try: + A = a.no_color() + except AttributeError: + A = string(a) + try: + B = b.no_color() + except AttributeError: + B = string(b) + + return ''.join((string(A), string(B))) + + def no_color(self): + if self.s: + return string(reduce(self._fold_no_color, self.s)) + return '' + + def embed(self): + prefix = '' + if self.enabled: + prefix = self.op + return ''.join((string(prefix), string(reduce(self._add, self.s)))) + + def __unicode__(self): + suffix = '' + if self.enabled: + suffix = RESET_SEQ + return string(''.join((self.embed(), string(suffix)))) + + def __str__(self): + return safe_str(self.__unicode__()) + + def node(self, s, op): + return self.__class__(enabled=self.enabled, op=op, *s) + + def black(self, *s): + return self.node(s, fg(30 + BLACK)) + + def red(self, *s): + return self.node(s, fg(30 + RED)) + + def green(self, *s): + return self.node(s, fg(30 + GREEN)) + + def yellow(self, *s): + return self.node(s, fg(30 + YELLOW)) + + def blue(self, *s): + return self.node(s, fg(30 + BLUE)) + + def magenta(self, *s): + return self.node(s, fg(30 + MAGENTA)) + + def cyan(self, *s): + return self.node(s, fg(30 + CYAN)) + + def white(self, *s): + return self.node(s, fg(30 + WHITE)) + + def __repr__(self): + return repr(self.no_color()) + + def bold(self, *s): + return self.node(s, OP_SEQ % 1) + + def underline(self, *s): + return self.node(s, OP_SEQ % 4) + + def blink(self, *s): + return self.node(s, OP_SEQ % 5) + + def reverse(self, *s): + return self.node(s, OP_SEQ % 7) + + def bright(self, *s): + return self.node(s, OP_SEQ % 8) + + def ired(self, *s): + return self.node(s, fg(40 + RED)) + + def igreen(self, *s): + return self.node(s, fg(40 + GREEN)) + + def iyellow(self, *s): + return self.node(s, fg(40 + YELLOW)) + + def iblue(self, *s): + return self.node(s, fg(40 + BLUE)) + + def imagenta(self, *s): + return self.node(s, fg(40 + MAGENTA)) + + def icyan(self, *s): + return self.node(s, fg(40 + CYAN)) + + def iwhite(self, *s): + return self.node(s, fg(40 + WHITE)) + + def reset(self, *s): + return self.node(s or [''], RESET_SEQ) + + def __add__(self, other): + return string(self) + string(other) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/text.py b/thesisenv/lib/python3.6/site-packages/celery/utils/text.py new file mode 100644 index 0000000..ffd2d72 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/text.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +""" + celery.utils.text + ~~~~~~~~~~~~~~~~~ + + Text formatting utilities + +""" +from __future__ import absolute_import + +from textwrap import fill + +from pprint import pformat + +__all__ = ['dedent_initial', 'dedent', 'fill_paragraphs', 'join', + 'ensure_2lines', 'abbr', 'abbrtask', 'indent', 'truncate', + 'pluralize', 'pretty'] + + +def dedent_initial(s, n=4): + return s[n:] if s[:n] == ' ' * n else s + + +def dedent(s, n=4, sep='\n'): + return sep.join(dedent_initial(l) for l in s.splitlines()) + + +def fill_paragraphs(s, width, sep='\n'): + return sep.join(fill(p, width) for p in s.split(sep)) + + +def join(l, sep='\n'): + return sep.join(v for v in l if v) + + +def ensure_2lines(s, sep='\n'): + if len(s.splitlines()) <= 2: + return s + sep + return s + + +def abbr(S, max, ellipsis='...'): + if S is None: + return '???' + if len(S) > max: + return ellipsis and (S[:max - len(ellipsis)] + ellipsis) or S[:max] + return S + + +def abbrtask(S, max): + if S is None: + return '???' + if len(S) > max: + module, _, cls = S.rpartition('.') + module = abbr(module, max - len(cls) - 3, False) + return module + '[.]' + cls + return S + + +def indent(t, indent=0, sep='\n'): + """Indent text.""" + return sep.join(' ' * indent + p for p in t.split(sep)) + + +def truncate(text, maxlen=128, suffix='...'): + """Truncates text to a maximum number of characters.""" + if len(text) >= maxlen: + return text[:maxlen].rsplit(' ', 1)[0] + suffix + return text + + +def pluralize(n, text, suffix='s'): + if n > 1: + return text + suffix + return text + + +def pretty(value, width=80, nl_width=80, sep='\n', **kw): + if isinstance(value, dict): + return '{{{0} {1}'.format(sep, pformat(value, 4, nl_width)[1:]) + elif isinstance(value, tuple): + return '{0}{1}{2}'.format( + sep, ' ' * 4, pformat(value, width=nl_width, **kw), + ) + else: + return pformat(value, width=width, **kw) diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/threads.py b/thesisenv/lib/python3.6/site-packages/celery/utils/threads.py new file mode 100644 index 0000000..5d42373 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/threads.py @@ -0,0 +1,329 @@ +# -*- coding: utf-8 -*- +""" + celery.utils.threads + ~~~~~~~~~~~~~~~~~~~~ + + Threading utilities. + +""" +from __future__ import absolute_import, print_function + +import os +import socket +import sys +import threading +import traceback + +from contextlib import contextmanager + +from celery.local import Proxy +from celery.five import THREAD_TIMEOUT_MAX, items + +__all__ = ['bgThread', 'Local', 'LocalStack', 'LocalManager', + 'get_ident', 'default_socket_timeout'] + +USE_FAST_LOCALS = os.environ.get('USE_FAST_LOCALS') +PY3 = sys.version_info[0] == 3 + + +@contextmanager +def default_socket_timeout(timeout): + prev = socket.getdefaulttimeout() + socket.setdefaulttimeout(timeout) + yield + socket.setdefaulttimeout(prev) + + +class bgThread(threading.Thread): + + def __init__(self, name=None, **kwargs): + super(bgThread, self).__init__() + self._is_shutdown = threading.Event() + self._is_stopped = threading.Event() + self.daemon = True + self.name = name or self.__class__.__name__ + + def body(self): + raise NotImplementedError('subclass responsibility') + + def on_crash(self, msg, *fmt, **kwargs): + print(msg.format(*fmt), file=sys.stderr) + exc_info = sys.exc_info() + try: + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, sys.stderr) + finally: + del(exc_info) + + def run(self): + body = self.body + shutdown_set = self._is_shutdown.is_set + try: + while not shutdown_set(): + try: + body() + except Exception as exc: + try: + self.on_crash('{0!r} crashed: {1!r}', self.name, exc) + self._set_stopped() + finally: + os._exit(1) # exiting by normal means won't work + finally: + self._set_stopped() + + def _set_stopped(self): + try: + self._is_stopped.set() + except TypeError: # pragma: no cover + # we lost the race at interpreter shutdown, + # so gc collected built-in modules. + pass + + def stop(self): + """Graceful shutdown.""" + self._is_shutdown.set() + self._is_stopped.wait() + if self.is_alive(): + self.join(THREAD_TIMEOUT_MAX) + +try: + from greenlet import getcurrent as get_ident +except ImportError: # pragma: no cover + try: + from _thread import get_ident # noqa + except ImportError: + try: + from thread import get_ident # noqa + except ImportError: # pragma: no cover + try: + from _dummy_thread import get_ident # noqa + except ImportError: + from dummy_thread import get_ident # noqa + + +def release_local(local): + """Releases the contents of the local for the current context. + This makes it possible to use locals without a manager. + + Example:: + + >>> loc = Local() + >>> loc.foo = 42 + >>> release_local(loc) + >>> hasattr(loc, 'foo') + False + + With this function one can release :class:`Local` objects as well + as :class:`StackLocal` objects. However it is not possible to + release data held by proxies that way, one always has to retain + a reference to the underlying local object in order to be able + to release it. + + .. versionadded:: 0.6.1 + """ + local.__release_local__() + + +class Local(object): + __slots__ = ('__storage__', '__ident_func__') + + def __init__(self): + object.__setattr__(self, '__storage__', {}) + object.__setattr__(self, '__ident_func__', get_ident) + + def __iter__(self): + return iter(items(self.__storage__)) + + def __call__(self, proxy): + """Create a proxy for a name.""" + return Proxy(self, proxy) + + def __release_local__(self): + self.__storage__.pop(self.__ident_func__(), None) + + def __getattr__(self, name): + try: + return self.__storage__[self.__ident_func__()][name] + except KeyError: + raise AttributeError(name) + + def __setattr__(self, name, value): + ident = self.__ident_func__() + storage = self.__storage__ + try: + storage[ident][name] = value + except KeyError: + storage[ident] = {name: value} + + def __delattr__(self, name): + try: + del self.__storage__[self.__ident_func__()][name] + except KeyError: + raise AttributeError(name) + + +class _LocalStack(object): + """This class works similar to a :class:`Local` but keeps a stack + of objects instead. This is best explained with an example:: + + >>> ls = LocalStack() + >>> ls.push(42) + >>> ls.top + 42 + >>> ls.push(23) + >>> ls.top + 23 + >>> ls.pop() + 23 + >>> ls.top + 42 + + They can be force released by using a :class:`LocalManager` or with + the :func:`release_local` function but the correct way is to pop the + item from the stack after using. When the stack is empty it will + no longer be bound to the current context (and as such released). + + By calling the stack without arguments it will return a proxy that + resolves to the topmost item on the stack. + + """ + + def __init__(self): + self._local = Local() + + def __release_local__(self): + self._local.__release_local__() + + def _get__ident_func__(self): + return self._local.__ident_func__ + + def _set__ident_func__(self, value): + object.__setattr__(self._local, '__ident_func__', value) + __ident_func__ = property(_get__ident_func__, _set__ident_func__) + del _get__ident_func__, _set__ident_func__ + + def __call__(self): + def _lookup(): + rv = self.top + if rv is None: + raise RuntimeError('object unbound') + return rv + return Proxy(_lookup) + + def push(self, obj): + """Pushes a new item to the stack""" + rv = getattr(self._local, 'stack', None) + if rv is None: + self._local.stack = rv = [] + rv.append(obj) + return rv + + def pop(self): + """Remove the topmost item from the stack, will return the + old value or `None` if the stack was already empty. + """ + stack = getattr(self._local, 'stack', None) + if stack is None: + return None + elif len(stack) == 1: + release_local(self._local) + return stack[-1] + else: + return stack.pop() + + def __len__(self): + stack = getattr(self._local, 'stack', None) + return len(stack) if stack else 0 + + @property + def stack(self): + """get_current_worker_task uses this to find + the original task that was executed by the worker.""" + stack = getattr(self._local, 'stack', None) + if stack is not None: + return stack + return [] + + @property + def top(self): + """The topmost item on the stack. If the stack is empty, + `None` is returned. + """ + try: + return self._local.stack[-1] + except (AttributeError, IndexError): + return None + + +class LocalManager(object): + """Local objects cannot manage themselves. For that you need a local + manager. You can pass a local manager multiple locals or add them + later by appending them to `manager.locals`. Everytime the manager + cleans up it, will clean up all the data left in the locals for this + context. + + The `ident_func` parameter can be added to override the default ident + function for the wrapped locals. + + """ + + def __init__(self, locals=None, ident_func=None): + if locals is None: + self.locals = [] + elif isinstance(locals, Local): + self.locals = [locals] + else: + self.locals = list(locals) + if ident_func is not None: + self.ident_func = ident_func + for local in self.locals: + object.__setattr__(local, '__ident_func__', ident_func) + else: + self.ident_func = get_ident + + def get_ident(self): + """Return the context identifier the local objects use internally + for this context. You cannot override this method to change the + behavior but use it to link other context local objects (such as + SQLAlchemy's scoped sessions) to the Werkzeug locals.""" + return self.ident_func() + + def cleanup(self): + """Manually clean up the data in the locals for this context. + + Call this at the end of the request or use `make_middleware()`. + + """ + for local in self.locals: + release_local(local) + + def __repr__(self): + return '<{0} storages: {1}>'.format( + self.__class__.__name__, len(self.locals)) + + +class _FastLocalStack(threading.local): + + def __init__(self): + self.stack = [] + self.push = self.stack.append + self.pop = self.stack.pop + + @property + def top(self): + try: + return self.stack[-1] + except (AttributeError, IndexError): + return None + + def __len__(self): + return len(self.stack) + +if USE_FAST_LOCALS: # pragma: no cover + LocalStack = _FastLocalStack +else: + # - See #706 + # since each thread has its own greenlet we can just use those as + # identifiers for the context. If greenlets are not available we + # fall back to the current thread ident. + LocalStack = _LocalStack # noqa diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/timer2.py b/thesisenv/lib/python3.6/site-packages/celery/utils/timer2.py new file mode 100644 index 0000000..e42660c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/timer2.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- +""" + timer2 + ~~~~~~ + + Scheduler for Python functions. + +""" +from __future__ import absolute_import + +import os +import sys +import threading + +from itertools import count +from time import sleep + +from celery.five import THREAD_TIMEOUT_MAX +from kombu.async.timer import Entry, Timer as Schedule, to_timestamp, logger + +TIMER_DEBUG = os.environ.get('TIMER_DEBUG') + +__all__ = ['Entry', 'Schedule', 'Timer', 'to_timestamp'] + + +class Timer(threading.Thread): + Entry = Entry + Schedule = Schedule + + running = False + on_tick = None + _timer_count = count(1) + + if TIMER_DEBUG: # pragma: no cover + def start(self, *args, **kwargs): + import traceback + print('- Timer starting') + traceback.print_stack() + super(Timer, self).start(*args, **kwargs) + + def __init__(self, schedule=None, on_error=None, on_tick=None, + on_start=None, max_interval=None, **kwargs): + self.schedule = schedule or self.Schedule(on_error=on_error, + max_interval=max_interval) + self.on_start = on_start + self.on_tick = on_tick or self.on_tick + threading.Thread.__init__(self) + self._is_shutdown = threading.Event() + self._is_stopped = threading.Event() + self.mutex = threading.Lock() + self.not_empty = threading.Condition(self.mutex) + self.daemon = True + self.name = 'Timer-{0}'.format(next(self._timer_count)) + + def _next_entry(self): + with self.not_empty: + delay, entry = next(self.scheduler) + if entry is None: + if delay is None: + self.not_empty.wait(1.0) + return delay + return self.schedule.apply_entry(entry) + __next__ = next = _next_entry # for 2to3 + + def run(self): + try: + self.running = True + self.scheduler = iter(self.schedule) + + while not self._is_shutdown.isSet(): + delay = self._next_entry() + if delay: + if self.on_tick: + self.on_tick(delay) + if sleep is None: # pragma: no cover + break + sleep(delay) + try: + self._is_stopped.set() + except TypeError: # pragma: no cover + # we lost the race at interpreter shutdown, + # so gc collected built-in modules. + pass + except Exception as exc: + logger.error('Thread Timer crashed: %r', exc, exc_info=True) + os._exit(1) + + def stop(self): + self._is_shutdown.set() + if self.running: + self._is_stopped.wait() + self.join(THREAD_TIMEOUT_MAX) + self.running = False + + def ensure_started(self): + if not self.running and not self.isAlive(): + if self.on_start: + self.on_start(self) + self.start() + + def _do_enter(self, meth, *args, **kwargs): + self.ensure_started() + with self.mutex: + entry = getattr(self.schedule, meth)(*args, **kwargs) + self.not_empty.notify() + return entry + + def enter(self, entry, eta, priority=None): + return self._do_enter('enter_at', entry, eta, priority=priority) + + def call_at(self, *args, **kwargs): + return self._do_enter('call_at', *args, **kwargs) + + def enter_after(self, *args, **kwargs): + return self._do_enter('enter_after', *args, **kwargs) + + def call_after(self, *args, **kwargs): + return self._do_enter('call_after', *args, **kwargs) + + def call_repeatedly(self, *args, **kwargs): + return self._do_enter('call_repeatedly', *args, **kwargs) + + def exit_after(self, secs, priority=10): + self.call_after(secs, sys.exit, priority) + + def cancel(self, tref): + tref.cancel() + + def clear(self): + self.schedule.clear() + + def empty(self): + return not len(self) + + def __len__(self): + return len(self.schedule) + + def __bool__(self): + return True + __nonzero__ = __bool__ + + @property + def queue(self): + return self.schedule.queue diff --git a/thesisenv/lib/python3.6/site-packages/celery/utils/timeutils.py b/thesisenv/lib/python3.6/site-packages/celery/utils/timeutils.py new file mode 100644 index 0000000..6dab703 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/utils/timeutils.py @@ -0,0 +1,370 @@ +# -*- coding: utf-8 -*- +""" + celery.utils.timeutils + ~~~~~~~~~~~~~~~~~~~~~~ + + This module contains various utilities related to dates and times. + +""" +from __future__ import absolute_import + +import numbers +import os +import sys +import time as _time + +from calendar import monthrange +from datetime import date, datetime, timedelta, tzinfo + +from kombu.utils import cached_property, reprcall +from kombu.utils.compat import timedelta_seconds + +from pytz import timezone as _timezone, AmbiguousTimeError, FixedOffset + +from celery.five import string_t + +from .functional import dictfilter +from .iso8601 import parse_iso8601 +from .text import pluralize + +__all__ = ['LocalTimezone', 'timezone', 'maybe_timedelta', 'timedelta_seconds', + 'delta_resolution', 'remaining', 'rate', 'weekday', + 'humanize_seconds', 'maybe_iso8601', 'is_naive', 'make_aware', + 'localize', 'to_utc', 'maybe_make_aware', 'ffwd', 'utcoffset', + 'adjust_timestamp', 'maybe_s_to_ms'] + +PY3 = sys.version_info[0] == 3 +PY33 = sys.version_info >= (3, 3) + +C_REMDEBUG = os.environ.get('C_REMDEBUG', False) + +DAYNAMES = 'sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat' +WEEKDAYS = dict(zip(DAYNAMES, range(7))) + +RATE_MODIFIER_MAP = {'s': lambda n: n, + 'm': lambda n: n / 60.0, + 'h': lambda n: n / 60.0 / 60.0} + +TIME_UNITS = (('day', 60 * 60 * 24.0, lambda n: format(n, '.2f')), + ('hour', 60 * 60.0, lambda n: format(n, '.2f')), + ('minute', 60.0, lambda n: format(n, '.2f')), + ('second', 1.0, lambda n: format(n, '.2f'))) + +ZERO = timedelta(0) + +_local_timezone = None + + +class LocalTimezone(tzinfo): + """Local time implementation taken from Python's docs. + + Used only when UTC is not enabled. + """ + _offset_cache = {} + + def __init__(self): + # This code is moved in __init__ to execute it as late as possible + # See get_default_timezone(). + self.STDOFFSET = timedelta(seconds=-_time.timezone) + if _time.daylight: + self.DSTOFFSET = timedelta(seconds=-_time.altzone) + else: + self.DSTOFFSET = self.STDOFFSET + self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET + tzinfo.__init__(self) + + def __repr__(self): + return ''.format( + int(timedelta_seconds(self.DSTOFFSET) / 3600), + ) + + def utcoffset(self, dt): + return self.DSTOFFSET if self._isdst(dt) else self.STDOFFSET + + def dst(self, dt): + return self.DSTDIFF if self._isdst(dt) else ZERO + + def tzname(self, dt): + return _time.tzname[self._isdst(dt)] + + if PY3: + + def fromutc(self, dt): + # The base tzinfo class no longer implements a DST + # offset aware .fromutc() in Python3 (Issue #2306). + + # I'd rather rely on pytz to do this, than port + # the C code from cpython's fromutc [asksol] + offset = int(self.utcoffset(dt).seconds / 60.0) + try: + tz = self._offset_cache[offset] + except KeyError: + tz = self._offset_cache[offset] = FixedOffset(offset) + return tz.fromutc(dt.replace(tzinfo=tz)) + + def _isdst(self, dt): + tt = (dt.year, dt.month, dt.day, + dt.hour, dt.minute, dt.second, + dt.weekday(), 0, 0) + stamp = _time.mktime(tt) + tt = _time.localtime(stamp) + return tt.tm_isdst > 0 + + +class _Zone(object): + + def tz_or_local(self, tzinfo=None): + if tzinfo is None: + return self.local + return self.get_timezone(tzinfo) + + def to_local(self, dt, local=None, orig=None): + if is_naive(dt): + dt = make_aware(dt, orig or self.utc) + return localize(dt, self.tz_or_local(local)) + + if PY33: + + def to_system(self, dt): + # tz=None is a special case since Python 3.3, and will + # convert to the current local timezone (Issue #2306). + return dt.astimezone(tz=None) + + else: + + def to_system(self, dt): # noqa + return localize(dt, self.local) + + def to_local_fallback(self, dt): + if is_naive(dt): + return make_aware(dt, self.local) + return localize(dt, self.local) + + def get_timezone(self, zone): + if isinstance(zone, string_t): + return _timezone(zone) + return zone + + @cached_property + def local(self): + return LocalTimezone() + + @cached_property + def utc(self): + return self.get_timezone('UTC') +timezone = _Zone() + + +def maybe_timedelta(delta): + """Coerces integer to timedelta if `delta` is an integer.""" + if isinstance(delta, numbers.Real): + return timedelta(seconds=delta) + return delta + + +def delta_resolution(dt, delta): + """Round a datetime to the resolution of a timedelta. + + If the timedelta is in days, the datetime will be rounded + to the nearest days, if the timedelta is in hours the datetime + will be rounded to the nearest hour, and so on until seconds + which will just return the original datetime. + + """ + delta = timedelta_seconds(delta) + + resolutions = ((3, lambda x: x / 86400), + (4, lambda x: x / 3600), + (5, lambda x: x / 60)) + + args = dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second + for res, predicate in resolutions: + if predicate(delta) >= 1.0: + return datetime(*args[:res], tzinfo=dt.tzinfo) + return dt + + +def remaining(start, ends_in, now=None, relative=False): + """Calculate the remaining time for a start date and a timedelta. + + e.g. "how many seconds left for 30 seconds after start?" + + :param start: Start :class:`~datetime.datetime`. + :param ends_in: The end delta as a :class:`~datetime.timedelta`. + :keyword relative: If enabled the end time will be + calculated using :func:`delta_resolution` (i.e. rounded to the + resolution of `ends_in`). + :keyword now: Function returning the current time and date, + defaults to :func:`datetime.utcnow`. + + """ + now = now or datetime.utcnow() + end_date = start + ends_in + if relative: + end_date = delta_resolution(end_date, ends_in) + ret = end_date - now + if C_REMDEBUG: # pragma: no cover + print('rem: NOW:%r START:%r ENDS_IN:%r END_DATE:%s REM:%s' % ( + now, start, ends_in, end_date, ret)) + return ret + + +def rate(rate): + """Parse rate strings, such as `"100/m"`, `"2/h"` or `"0.5/s"` + and convert them to seconds.""" + if rate: + if isinstance(rate, string_t): + ops, _, modifier = rate.partition('/') + return RATE_MODIFIER_MAP[modifier or 's'](float(ops)) or 0 + return rate or 0 + return 0 + + +def weekday(name): + """Return the position of a weekday (0 - 7, where 0 is Sunday). + + Example:: + + >>> weekday('sunday'), weekday('sun'), weekday('mon') + (0, 0, 1) + + """ + abbreviation = name[0:3].lower() + try: + return WEEKDAYS[abbreviation] + except KeyError: + # Show original day name in exception, instead of abbr. + raise KeyError(name) + + +def humanize_seconds(secs, prefix='', sep='', now='now'): + """Show seconds in human form, e.g. 60 is "1 minute", 7200 is "2 + hours". + + :keyword prefix: Can be used to add a preposition to the output, + e.g. 'in' will give 'in 1 second', but add nothing to 'now'. + + """ + secs = float(secs) + for unit, divider, formatter in TIME_UNITS: + if secs >= divider: + w = secs / divider + return '{0}{1}{2} {3}'.format(prefix, sep, formatter(w), + pluralize(w, unit)) + return now + + +def maybe_iso8601(dt): + """`Either datetime | str -> datetime or None -> None`""" + if not dt: + return + if isinstance(dt, datetime): + return dt + return parse_iso8601(dt) + + +def is_naive(dt): + """Return :const:`True` if the datetime is naive + (does not have timezone information).""" + return dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None + + +def make_aware(dt, tz): + """Sets the timezone for a datetime object.""" + try: + _localize = tz.localize + except AttributeError: + return dt.replace(tzinfo=tz) + else: + # works on pytz timezones + try: + return _localize(dt, is_dst=None) + except AmbiguousTimeError: + return min(_localize(dt, is_dst=True), + _localize(dt, is_dst=False)) + + +def localize(dt, tz): + """Convert aware datetime to another timezone.""" + dt = dt.astimezone(tz) + try: + _normalize = tz.normalize + except AttributeError: # non-pytz tz + return dt + else: + try: + return _normalize(dt, is_dst=None) + except TypeError: + return _normalize(dt) + except AmbiguousTimeError: + return min(_normalize(dt, is_dst=True), + _normalize(dt, is_dst=False)) + + +def to_utc(dt): + """Converts naive datetime to UTC""" + return make_aware(dt, timezone.utc) + + +def maybe_make_aware(dt, tz=None): + if is_naive(dt): + dt = to_utc(dt) + return localize( + dt, timezone.utc if tz is None else timezone.tz_or_local(tz), + ) + + +class ffwd(object): + """Version of relativedelta that only supports addition.""" + + def __init__(self, year=None, month=None, weeks=0, weekday=None, day=None, + hour=None, minute=None, second=None, microsecond=None, + **kwargs): + self.year = year + self.month = month + self.weeks = weeks + self.weekday = weekday + self.day = day + self.hour = hour + self.minute = minute + self.second = second + self.microsecond = microsecond + self.days = weeks * 7 + self._has_time = self.hour is not None or self.minute is not None + + def __repr__(self): + return reprcall('ffwd', (), self._fields(weeks=self.weeks, + weekday=self.weekday)) + + def __radd__(self, other): + if not isinstance(other, date): + return NotImplemented + year = self.year or other.year + month = self.month or other.month + day = min(monthrange(year, month)[1], self.day or other.day) + ret = other.replace(**dict(dictfilter(self._fields()), + year=year, month=month, day=day)) + if self.weekday is not None: + ret += timedelta(days=(7 - ret.weekday() + self.weekday) % 7) + return ret + timedelta(days=self.days) + + def _fields(self, **extra): + return dictfilter({ + 'year': self.year, 'month': self.month, 'day': self.day, + 'hour': self.hour, 'minute': self.minute, + 'second': self.second, 'microsecond': self.microsecond, + }, **extra) + + +def utcoffset(time=_time, localtime=_time.localtime): + if localtime().tm_isdst: + return time.altzone // 3600 + return time.timezone // 3600 + + +def adjust_timestamp(ts, offset, here=utcoffset): + return ts - (offset - here()) * 3600 + + +def maybe_s_to_ms(v): + return int(float(v) * 1000.0) if v is not None else v diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/__init__.py b/thesisenv/lib/python3.6/site-packages/celery/worker/__init__.py new file mode 100644 index 0000000..3d65dd1 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/worker/__init__.py @@ -0,0 +1,393 @@ +# -*- coding: utf-8 -*- +""" + celery.worker + ~~~~~~~~~~~~~ + + :class:`WorkController` can be used to instantiate in-process workers. + + The worker consists of several components, all managed by bootsteps + (mod:`celery.bootsteps`). + +""" +from __future__ import absolute_import + +import os +import sys +import traceback +try: + import resource +except ImportError: # pragma: no cover + resource = None # noqa + +from billiard import cpu_count +from billiard.util import Finalize +from kombu.syn import detect_environment + +from celery import bootsteps +from celery.bootsteps import RUN, TERMINATE +from celery import concurrency as _concurrency +from celery import platforms +from celery import signals +from celery.exceptions import ( + ImproperlyConfigured, WorkerTerminate, TaskRevokedError, +) +from celery.five import string_t, values +from celery.utils import default_nodename, worker_direct +from celery.utils.imports import reload_from_cwd +from celery.utils.log import mlevel, worker_logger as logger +from celery.utils.threads import default_socket_timeout + +from . import state + +__all__ = ['WorkController', 'default_nodename'] + +#: Default socket timeout at shutdown. +SHUTDOWN_SOCKET_TIMEOUT = 5.0 + +SELECT_UNKNOWN_QUEUE = """\ +Trying to select queue subset of {0!r}, but queue {1} is not +defined in the CELERY_QUEUES setting. + +If you want to automatically declare unknown queues you can +enable the CELERY_CREATE_MISSING_QUEUES setting. +""" + +DESELECT_UNKNOWN_QUEUE = """\ +Trying to deselect queue subset of {0!r}, but queue {1} is not +defined in the CELERY_QUEUES setting. +""" + + +def str_to_list(s): + if isinstance(s, string_t): + return s.split(',') + return s + + +class WorkController(object): + """Unmanaged worker instance.""" + app = None + + pidlock = None + blueprint = None + pool = None + semaphore = None + + class Blueprint(bootsteps.Blueprint): + """Worker bootstep blueprint.""" + name = 'Worker' + default_steps = set([ + 'celery.worker.components:Hub', + 'celery.worker.components:Queues', + 'celery.worker.components:Pool', + 'celery.worker.components:Beat', + 'celery.worker.components:Timer', + 'celery.worker.components:StateDB', + 'celery.worker.components:Consumer', + 'celery.worker.autoscale:WorkerComponent', + 'celery.worker.autoreload:WorkerComponent', + + ]) + + def __init__(self, app=None, hostname=None, **kwargs): + self.app = app or self.app + self.hostname = default_nodename(hostname) + self.app.loader.init_worker() + self.on_before_init(**kwargs) + self.setup_defaults(**kwargs) + self.on_after_init(**kwargs) + + self.setup_instance(**self.prepare_args(**kwargs)) + self._finalize = [ + Finalize(self, self._send_worker_shutdown, exitpriority=10), + ] + + def setup_instance(self, queues=None, ready_callback=None, pidfile=None, + include=None, use_eventloop=None, exclude_queues=None, + **kwargs): + self.pidfile = pidfile + self.setup_queues(queues, exclude_queues) + self.setup_includes(str_to_list(include)) + + # Set default concurrency + if not self.concurrency: + try: + self.concurrency = cpu_count() + except NotImplementedError: + self.concurrency = 2 + + # Options + self.loglevel = mlevel(self.loglevel) + self.ready_callback = ready_callback or self.on_consumer_ready + + # this connection is not established, only used for params + self._conninfo = self.app.connection() + self.use_eventloop = ( + self.should_use_eventloop() if use_eventloop is None + else use_eventloop + ) + self.options = kwargs + + signals.worker_init.send(sender=self) + + # Initialize bootsteps + self.pool_cls = _concurrency.get_implementation(self.pool_cls) + self.steps = [] + self.on_init_blueprint() + self.blueprint = self.Blueprint(app=self.app, + on_start=self.on_start, + on_close=self.on_close, + on_stopped=self.on_stopped) + self.blueprint.apply(self, **kwargs) + + def on_init_blueprint(self): + pass + + def on_before_init(self, **kwargs): + pass + + def on_after_init(self, **kwargs): + pass + + def on_start(self): + if self.pidfile: + self.pidlock = platforms.create_pidlock(self.pidfile) + + def on_consumer_ready(self, consumer): + pass + + def on_close(self): + self.app.loader.shutdown_worker() + + def on_stopped(self): + self.timer.stop() + self.consumer.shutdown() + + if self.pidlock: + self.pidlock.release() + + def setup_queues(self, include, exclude=None): + include = str_to_list(include) + exclude = str_to_list(exclude) + try: + self.app.amqp.queues.select(include) + except KeyError as exc: + raise ImproperlyConfigured( + SELECT_UNKNOWN_QUEUE.format(include, exc)) + try: + self.app.amqp.queues.deselect(exclude) + except KeyError as exc: + raise ImproperlyConfigured( + DESELECT_UNKNOWN_QUEUE.format(exclude, exc)) + if self.app.conf.CELERY_WORKER_DIRECT: + self.app.amqp.queues.select_add(worker_direct(self.hostname)) + + def setup_includes(self, includes): + # Update celery_include to have all known task modules, so that we + # ensure all task modules are imported in case an execv happens. + prev = tuple(self.app.conf.CELERY_INCLUDE) + if includes: + prev += tuple(includes) + [self.app.loader.import_task_module(m) for m in includes] + self.include = includes + task_modules = set(task.__class__.__module__ + for task in values(self.app.tasks)) + self.app.conf.CELERY_INCLUDE = tuple(set(prev) | task_modules) + + def prepare_args(self, **kwargs): + return kwargs + + def _send_worker_shutdown(self): + signals.worker_shutdown.send(sender=self) + + def start(self): + """Starts the workers main loop.""" + try: + self.blueprint.start(self) + except WorkerTerminate: + self.terminate() + except Exception as exc: + logger.error('Unrecoverable error: %r', exc, exc_info=True) + self.stop() + except (KeyboardInterrupt, SystemExit): + self.stop() + + def register_with_event_loop(self, hub): + self.blueprint.send_all( + self, 'register_with_event_loop', args=(hub, ), + description='hub.register', + ) + + def _process_task_sem(self, req): + return self._quick_acquire(self._process_task, req) + + def _process_task(self, req): + """Process task by sending it to the pool of workers.""" + try: + req.execute_using_pool(self.pool) + except TaskRevokedError: + try: + self._quick_release() # Issue 877 + except AttributeError: + pass + except Exception as exc: + logger.critical('Internal error: %r\n%s', + exc, traceback.format_exc(), exc_info=True) + + def signal_consumer_close(self): + try: + self.consumer.close() + except AttributeError: + pass + + def should_use_eventloop(self): + return (detect_environment() == 'default' and + self._conninfo.is_evented and not self.app.IS_WINDOWS) + + def stop(self, in_sighandler=False): + """Graceful shutdown of the worker server.""" + if self.blueprint.state == RUN: + self.signal_consumer_close() + if not in_sighandler or self.pool.signal_safe: + self._shutdown(warm=True) + + def terminate(self, in_sighandler=False): + """Not so graceful shutdown of the worker server.""" + if self.blueprint.state != TERMINATE: + self.signal_consumer_close() + if not in_sighandler or self.pool.signal_safe: + self._shutdown(warm=False) + + def _shutdown(self, warm=True): + # if blueprint does not exist it means that we had an + # error before the bootsteps could be initialized. + if self.blueprint is not None: + with default_socket_timeout(SHUTDOWN_SOCKET_TIMEOUT): # Issue 975 + self.blueprint.stop(self, terminate=not warm) + self.blueprint.join() + + def reload(self, modules=None, reload=False, reloader=None): + modules = self.app.loader.task_modules if modules is None else modules + imp = self.app.loader.import_from_cwd + + for module in set(modules or ()): + if module not in sys.modules: + logger.debug('importing module %s', module) + imp(module) + elif reload: + logger.debug('reloading module %s', module) + reload_from_cwd(sys.modules[module], reloader) + + if self.consumer: + self.consumer.update_strategies() + self.consumer.reset_rate_limits() + try: + self.pool.restart() + except NotImplementedError: + pass + + def info(self): + return {'total': self.state.total_count, + 'pid': os.getpid(), + 'clock': str(self.app.clock)} + + def rusage(self): + if resource is None: + raise NotImplementedError('rusage not supported by this platform') + s = resource.getrusage(resource.RUSAGE_SELF) + return { + 'utime': s.ru_utime, + 'stime': s.ru_stime, + 'maxrss': s.ru_maxrss, + 'ixrss': s.ru_ixrss, + 'idrss': s.ru_idrss, + 'isrss': s.ru_isrss, + 'minflt': s.ru_minflt, + 'majflt': s.ru_majflt, + 'nswap': s.ru_nswap, + 'inblock': s.ru_inblock, + 'oublock': s.ru_oublock, + 'msgsnd': s.ru_msgsnd, + 'msgrcv': s.ru_msgrcv, + 'nsignals': s.ru_nsignals, + 'nvcsw': s.ru_nvcsw, + 'nivcsw': s.ru_nivcsw, + } + + def stats(self): + info = self.info() + info.update(self.blueprint.info(self)) + info.update(self.consumer.blueprint.info(self.consumer)) + try: + info['rusage'] = self.rusage() + except NotImplementedError: + info['rusage'] = 'N/A' + return info + + def __repr__(self): + return ''.format( + self=self, + state=(self.blueprint.human_state() + if self.blueprint else 'initializing'), # Issue #2514 + ) + + def __str__(self): + return self.hostname + + @property + def state(self): + return state + + def setup_defaults(self, concurrency=None, loglevel=None, logfile=None, + send_events=None, pool_cls=None, consumer_cls=None, + timer_cls=None, timer_precision=None, + autoscaler_cls=None, autoreloader_cls=None, + pool_putlocks=None, pool_restarts=None, + force_execv=None, state_db=None, + schedule_filename=None, scheduler_cls=None, + task_time_limit=None, task_soft_time_limit=None, + max_tasks_per_child=None, prefetch_multiplier=None, + disable_rate_limits=None, worker_lost_wait=None, **_kw): + self.concurrency = self._getopt('concurrency', concurrency) + self.loglevel = self._getopt('log_level', loglevel) + self.logfile = self._getopt('log_file', logfile) + self.send_events = self._getopt('send_events', send_events) + self.pool_cls = self._getopt('pool', pool_cls) + self.consumer_cls = self._getopt('consumer', consumer_cls) + self.timer_cls = self._getopt('timer', timer_cls) + self.timer_precision = self._getopt('timer_precision', timer_precision) + self.autoscaler_cls = self._getopt('autoscaler', autoscaler_cls) + self.autoreloader_cls = self._getopt('autoreloader', autoreloader_cls) + self.pool_putlocks = self._getopt('pool_putlocks', pool_putlocks) + self.pool_restarts = self._getopt('pool_restarts', pool_restarts) + self.force_execv = self._getopt('force_execv', force_execv) + self.state_db = self._getopt('state_db', state_db) + self.schedule_filename = self._getopt( + 'schedule_filename', schedule_filename, + ) + self.scheduler_cls = self._getopt( + 'celerybeat_scheduler', scheduler_cls, + ) + self.task_time_limit = self._getopt( + 'task_time_limit', task_time_limit, + ) + self.task_soft_time_limit = self._getopt( + 'task_soft_time_limit', task_soft_time_limit, + ) + self.max_tasks_per_child = self._getopt( + 'max_tasks_per_child', max_tasks_per_child, + ) + self.prefetch_multiplier = int(self._getopt( + 'prefetch_multiplier', prefetch_multiplier, + )) + self.disable_rate_limits = self._getopt( + 'disable_rate_limits', disable_rate_limits, + ) + self.worker_lost_wait = self._getopt( + 'worker_lost_wait', worker_lost_wait, + ) + + def _getopt(self, key, value): + if value is not None: + return value + return self.app.conf.find_value_for_key(key, namespace='celeryd') diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/autoreload.py b/thesisenv/lib/python3.6/site-packages/celery/worker/autoreload.py new file mode 100644 index 0000000..8ade32f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/worker/autoreload.py @@ -0,0 +1,302 @@ +# -*- coding: utf-8 -*- +""" + celery.worker.autoreload + ~~~~~~~~~~~~~~~~~~~~~~~~ + + This module implements automatic module reloading +""" +from __future__ import absolute_import + +import hashlib +import os +import select +import sys +import time + +from collections import defaultdict +from threading import Event + +from kombu.utils import eventio +from kombu.utils.encoding import ensure_bytes + +from celery import bootsteps +from celery.five import items +from celery.platforms import ignore_errno +from celery.utils.imports import module_file +from celery.utils.log import get_logger +from celery.utils.threads import bgThread + +from .components import Pool + +try: # pragma: no cover + import pyinotify + _ProcessEvent = pyinotify.ProcessEvent +except ImportError: # pragma: no cover + pyinotify = None # noqa + _ProcessEvent = object # noqa + +__all__ = [ + 'WorkerComponent', 'Autoreloader', 'Monitor', 'BaseMonitor', + 'StatMonitor', 'KQueueMonitor', 'InotifyMonitor', 'file_hash', +] + +logger = get_logger(__name__) + + +class WorkerComponent(bootsteps.StartStopStep): + label = 'Autoreloader' + conditional = True + requires = (Pool, ) + + def __init__(self, w, autoreload=None, **kwargs): + self.enabled = w.autoreload = autoreload + w.autoreloader = None + + def create(self, w): + w.autoreloader = self.instantiate(w.autoreloader_cls, w) + return w.autoreloader if not w.use_eventloop else None + + def register_with_event_loop(self, w, hub): + w.autoreloader.register_with_event_loop(hub) + hub.on_close.add(w.autoreloader.on_event_loop_close) + + +def file_hash(filename, algorithm='md5'): + hobj = hashlib.new(algorithm) + with open(filename, 'rb') as f: + for chunk in iter(lambda: f.read(2 ** 20), ''): + hobj.update(ensure_bytes(chunk)) + return hobj.digest() + + +class BaseMonitor(object): + + def __init__(self, files, + on_change=None, shutdown_event=None, interval=0.5): + self.files = files + self.interval = interval + self._on_change = on_change + self.modify_times = defaultdict(int) + self.shutdown_event = shutdown_event or Event() + + def start(self): + raise NotImplementedError('Subclass responsibility') + + def stop(self): + pass + + def on_change(self, modified): + if self._on_change: + return self._on_change(modified) + + def on_event_loop_close(self, hub): + pass + + +class StatMonitor(BaseMonitor): + """File change monitor based on the ``stat`` system call.""" + + def _mtimes(self): + return ((f, self._mtime(f)) for f in self.files) + + def _maybe_modified(self, f, mt): + return mt is not None and self.modify_times[f] != mt + + def register_with_event_loop(self, hub): + hub.call_repeatedly(2.0, self.find_changes) + + def find_changes(self): + maybe_modified = self._maybe_modified + modified = dict((f, mt) for f, mt in self._mtimes() + if maybe_modified(f, mt)) + if modified: + self.on_change(modified) + self.modify_times.update(modified) + + def start(self): + while not self.shutdown_event.is_set(): + self.find_changes() + time.sleep(self.interval) + + @staticmethod + def _mtime(path): + try: + return os.stat(path).st_mtime + except Exception: + pass + + +class KQueueMonitor(BaseMonitor): + """File change monitor based on BSD kernel event notifications""" + + def __init__(self, *args, **kwargs): + super(KQueueMonitor, self).__init__(*args, **kwargs) + self.filemap = dict((f, None) for f in self.files) + self.fdmap = {} + + def register_with_event_loop(self, hub): + if eventio.kqueue is not None: + self._kq = eventio._kqueue() + self.add_events(self._kq) + self._kq.on_file_change = self.handle_event + hub.add_reader(self._kq._kqueue, self._kq.poll, 0) + + def on_event_loop_close(self, hub): + self.close(self._kq) + + def add_events(self, poller): + for f in self.filemap: + self.filemap[f] = fd = os.open(f, os.O_RDONLY) + self.fdmap[fd] = f + poller.watch_file(fd) + + def handle_event(self, events): + self.on_change([self.fdmap[e.ident] for e in events]) + + def start(self): + self.poller = eventio.poll() + self.add_events(self.poller) + self.poller.on_file_change = self.handle_event + while not self.shutdown_event.is_set(): + self.poller.poll(1) + + def close(self, poller): + for f, fd in items(self.filemap): + if fd is not None: + poller.unregister(fd) + with ignore_errno('EBADF'): # pragma: no cover + os.close(fd) + self.filemap.clear() + self.fdmap.clear() + + def stop(self): + self.close(self.poller) + self.poller.close() + + +class InotifyMonitor(_ProcessEvent): + """File change monitor based on Linux kernel `inotify` subsystem""" + + def __init__(self, modules, on_change=None, **kwargs): + assert pyinotify + self._modules = modules + self._on_change = on_change + self._wm = None + self._notifier = None + + def register_with_event_loop(self, hub): + self.create_notifier() + hub.add_reader(self._wm.get_fd(), self.on_readable) + + def on_event_loop_close(self, hub): + pass + + def on_readable(self): + self._notifier.read_events() + self._notifier.process_events() + + def create_notifier(self): + self._wm = pyinotify.WatchManager() + self._notifier = pyinotify.Notifier(self._wm, self) + add_watch = self._wm.add_watch + flags = pyinotify.IN_MODIFY | pyinotify.IN_ATTRIB + for m in self._modules: + add_watch(m, flags) + + def start(self): + try: + self.create_notifier() + self._notifier.loop() + finally: + if self._wm: + self._wm.close() + # Notifier.close is called at the end of Notifier.loop + self._wm = self._notifier = None + + def stop(self): + pass + + def process_(self, event): + self.on_change([event.path]) + + process_IN_ATTRIB = process_IN_MODIFY = process_ + + def on_change(self, modified): + if self._on_change: + return self._on_change(modified) + + +def default_implementation(): + if hasattr(select, 'kqueue') and eventio.kqueue is not None: + return 'kqueue' + elif sys.platform.startswith('linux') and pyinotify: + return 'inotify' + else: + return 'stat' + +implementations = {'kqueue': KQueueMonitor, + 'inotify': InotifyMonitor, + 'stat': StatMonitor} +Monitor = implementations[ + os.environ.get('CELERYD_FSNOTIFY') or default_implementation()] + + +class Autoreloader(bgThread): + """Tracks changes in modules and fires reload commands""" + Monitor = Monitor + + def __init__(self, controller, modules=None, monitor_cls=None, **options): + super(Autoreloader, self).__init__() + self.controller = controller + app = self.controller.app + self.modules = app.loader.task_modules if modules is None else modules + self.options = options + self._monitor = None + self._hashes = None + self.file_to_module = {} + + def on_init(self): + files = self.file_to_module + files.update(dict( + (module_file(sys.modules[m]), m) for m in self.modules)) + + self._monitor = self.Monitor( + files, self.on_change, + shutdown_event=self._is_shutdown, **self.options) + self._hashes = dict([(f, file_hash(f)) for f in files]) + + def register_with_event_loop(self, hub): + if self._monitor is None: + self.on_init() + self._monitor.register_with_event_loop(hub) + + def on_event_loop_close(self, hub): + if self._monitor is not None: + self._monitor.on_event_loop_close(hub) + + def body(self): + self.on_init() + with ignore_errno('EINTR', 'EAGAIN'): + self._monitor.start() + + def _maybe_modified(self, f): + if os.path.exists(f): + digest = file_hash(f) + if digest != self._hashes[f]: + self._hashes[f] = digest + return True + return False + + def on_change(self, files): + modified = [f for f in files if self._maybe_modified(f)] + if modified: + names = [self.file_to_module[module] for module in modified] + logger.info('Detected modified modules: %r', names) + self._reload(names) + + def _reload(self, modules): + self.controller.reload(modules, reload=True) + + def stop(self): + if self._monitor: + self._monitor.stop() diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/autoscale.py b/thesisenv/lib/python3.6/site-packages/celery/worker/autoscale.py new file mode 100644 index 0000000..265feda --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/worker/autoscale.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- +""" + celery.worker.autoscale + ~~~~~~~~~~~~~~~~~~~~~~~ + + This module implements the internal thread responsible + for growing and shrinking the pool according to the + current autoscale settings. + + The autoscale thread is only enabled if :option:`--autoscale` + has been enabled on the command-line. + +""" +from __future__ import absolute_import + +import os +import threading + +from time import sleep + +from kombu.async.semaphore import DummyLock + +from celery import bootsteps +from celery.five import monotonic +from celery.utils.log import get_logger +from celery.utils.threads import bgThread + +from . import state +from .components import Pool + +__all__ = ['Autoscaler', 'WorkerComponent'] + +logger = get_logger(__name__) +debug, info, error = logger.debug, logger.info, logger.error + +AUTOSCALE_KEEPALIVE = float(os.environ.get('AUTOSCALE_KEEPALIVE', 30)) + + +class WorkerComponent(bootsteps.StartStopStep): + label = 'Autoscaler' + conditional = True + requires = (Pool, ) + + def __init__(self, w, **kwargs): + self.enabled = w.autoscale + w.autoscaler = None + + def create(self, w): + scaler = w.autoscaler = self.instantiate( + w.autoscaler_cls, + w.pool, w.max_concurrency, w.min_concurrency, + worker=w, mutex=DummyLock() if w.use_eventloop else None, + ) + return scaler if not w.use_eventloop else None + + def register_with_event_loop(self, w, hub): + w.consumer.on_task_message.add(w.autoscaler.maybe_scale) + hub.call_repeatedly( + w.autoscaler.keepalive, w.autoscaler.maybe_scale, + ) + + +class Autoscaler(bgThread): + + def __init__(self, pool, max_concurrency, + min_concurrency=0, worker=None, + keepalive=AUTOSCALE_KEEPALIVE, mutex=None): + super(Autoscaler, self).__init__() + self.pool = pool + self.mutex = mutex or threading.Lock() + self.max_concurrency = max_concurrency + self.min_concurrency = min_concurrency + self.keepalive = keepalive + self._last_action = None + self.worker = worker + + assert self.keepalive, 'cannot scale down too fast.' + + def body(self): + with self.mutex: + self.maybe_scale() + sleep(1.0) + + def _maybe_scale(self, req=None): + procs = self.processes + cur = min(self.qty, self.max_concurrency) + if cur > procs: + self.scale_up(cur - procs) + return True + elif cur < procs: + self.scale_down((procs - cur) - self.min_concurrency) + return True + + def maybe_scale(self, req=None): + if self._maybe_scale(req): + self.pool.maintain_pool() + + def update(self, max=None, min=None): + with self.mutex: + if max is not None: + if max < self.max_concurrency: + self._shrink(self.processes - max) + self.max_concurrency = max + if min is not None: + if min > self.min_concurrency: + self._grow(min - self.min_concurrency) + self.min_concurrency = min + return self.max_concurrency, self.min_concurrency + + def force_scale_up(self, n): + with self.mutex: + new = self.processes + n + if new > self.max_concurrency: + self.max_concurrency = new + self.min_concurrency += 1 + self._grow(n) + + def force_scale_down(self, n): + with self.mutex: + new = self.processes - n + if new < self.min_concurrency: + self.min_concurrency = max(new, 0) + self._shrink(min(n, self.processes)) + + def scale_up(self, n): + self._last_action = monotonic() + return self._grow(n) + + def scale_down(self, n): + if n and self._last_action and ( + monotonic() - self._last_action > self.keepalive): + self._last_action = monotonic() + return self._shrink(n) + + def _grow(self, n): + info('Scaling up %s processes.', n) + self.pool.grow(n) + self.worker.consumer._update_prefetch_count(n) + + def _shrink(self, n): + info('Scaling down %s processes.', n) + try: + self.pool.shrink(n) + except ValueError: + debug("Autoscaler won't scale down: all processes busy.") + except Exception as exc: + error('Autoscaler: scale_down: %r', exc, exc_info=True) + self.worker.consumer._update_prefetch_count(-n) + + def info(self): + return {'max': self.max_concurrency, + 'min': self.min_concurrency, + 'current': self.processes, + 'qty': self.qty} + + @property + def qty(self): + return len(state.reserved_requests) + + @property + def processes(self): + return self.pool.num_processes diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/components.py b/thesisenv/lib/python3.6/site-packages/celery/worker/components.py new file mode 100644 index 0000000..bb02f4e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/worker/components.py @@ -0,0 +1,247 @@ +# -*- coding: utf-8 -*- +""" + celery.worker.components + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Default worker bootsteps. + +""" +from __future__ import absolute_import + +import atexit +import warnings + +from kombu.async import Hub as _Hub, get_event_loop, set_event_loop +from kombu.async.semaphore import DummyLock, LaxBoundedSemaphore +from kombu.async.timer import Timer as _Timer + +from celery import bootsteps +from celery._state import _set_task_join_will_block +from celery.exceptions import ImproperlyConfigured +from celery.five import string_t +from celery.utils.log import worker_logger as logger + +__all__ = ['Timer', 'Hub', 'Queues', 'Pool', 'Beat', 'StateDB', 'Consumer'] + +ERR_B_GREEN = """\ +-B option doesn't work with eventlet/gevent pools: \ +use standalone beat instead.\ +""" + +W_POOL_SETTING = """ +The CELERYD_POOL setting should not be used to select the eventlet/gevent +pools, instead you *must use the -P* argument so that patches are applied +as early as possible. +""" + + +class Timer(bootsteps.Step): + """This step initializes the internal timer used by the worker.""" + + def create(self, w): + if w.use_eventloop: + # does not use dedicated timer thread. + w.timer = _Timer(max_interval=10.0) + else: + if not w.timer_cls: + # Default Timer is set by the pool, as e.g. eventlet + # needs a custom implementation. + w.timer_cls = w.pool_cls.Timer + w.timer = self.instantiate(w.timer_cls, + max_interval=w.timer_precision, + on_timer_error=self.on_timer_error, + on_timer_tick=self.on_timer_tick) + + def on_timer_error(self, exc): + logger.error('Timer error: %r', exc, exc_info=True) + + def on_timer_tick(self, delay): + logger.debug('Timer wake-up! Next eta %s secs.', delay) + + +class Hub(bootsteps.StartStopStep): + requires = (Timer, ) + + def __init__(self, w, **kwargs): + w.hub = None + + def include_if(self, w): + return w.use_eventloop + + def create(self, w): + w.hub = get_event_loop() + if w.hub is None: + w.hub = set_event_loop(_Hub(w.timer)) + self._patch_thread_primitives(w) + return self + + def start(self, w): + pass + + def stop(self, w): + w.hub.close() + + def terminate(self, w): + w.hub.close() + + def _patch_thread_primitives(self, w): + # make clock use dummy lock + w.app.clock.mutex = DummyLock() + # multiprocessing's ApplyResult uses this lock. + try: + from billiard import pool + except ImportError: + pass + else: + pool.Lock = DummyLock + + +class Queues(bootsteps.Step): + """This bootstep initializes the internal queues + used by the worker.""" + label = 'Queues (intra)' + requires = (Hub, ) + + def create(self, w): + w.process_task = w._process_task + if w.use_eventloop: + if w.pool_putlocks and w.pool_cls.uses_semaphore: + w.process_task = w._process_task_sem + + +class Pool(bootsteps.StartStopStep): + """Bootstep managing the worker pool. + + Describes how to initialize the worker pool, and starts and stops + the pool during worker startup/shutdown. + + Adds attributes: + + * autoscale + * pool + * max_concurrency + * min_concurrency + + """ + requires = (Queues, ) + + def __init__(self, w, autoscale=None, autoreload=None, + no_execv=False, optimization=None, **kwargs): + if isinstance(autoscale, string_t): + max_c, _, min_c = autoscale.partition(',') + autoscale = [int(max_c), min_c and int(min_c) or 0] + w.autoscale = autoscale + w.pool = None + w.max_concurrency = None + w.min_concurrency = w.concurrency + w.no_execv = no_execv + if w.autoscale: + w.max_concurrency, w.min_concurrency = w.autoscale + self.autoreload_enabled = autoreload + self.optimization = optimization + + def close(self, w): + if w.pool: + w.pool.close() + + def terminate(self, w): + if w.pool: + w.pool.terminate() + + def create(self, w, semaphore=None, max_restarts=None): + if w.app.conf.CELERYD_POOL in ('eventlet', 'gevent'): + warnings.warn(UserWarning(W_POOL_SETTING)) + threaded = not w.use_eventloop + procs = w.min_concurrency + forking_enable = w.no_execv if w.force_execv else True + if not threaded: + semaphore = w.semaphore = LaxBoundedSemaphore(procs) + w._quick_acquire = w.semaphore.acquire + w._quick_release = w.semaphore.release + max_restarts = 100 + allow_restart = self.autoreload_enabled or w.pool_restarts + pool = w.pool = self.instantiate( + w.pool_cls, w.min_concurrency, + initargs=(w.app, w.hostname), + maxtasksperchild=w.max_tasks_per_child, + timeout=w.task_time_limit, + soft_timeout=w.task_soft_time_limit, + putlocks=w.pool_putlocks and threaded, + lost_worker_timeout=w.worker_lost_wait, + threads=threaded, + max_restarts=max_restarts, + allow_restart=allow_restart, + forking_enable=forking_enable, + semaphore=semaphore, + sched_strategy=self.optimization, + ) + _set_task_join_will_block(pool.task_join_will_block) + return pool + + def info(self, w): + return {'pool': w.pool.info if w.pool else 'N/A'} + + def register_with_event_loop(self, w, hub): + w.pool.register_with_event_loop(hub) + + +class Beat(bootsteps.StartStopStep): + """Step used to embed a beat process. + + This will only be enabled if the ``beat`` + argument is set. + + """ + label = 'Beat' + conditional = True + + def __init__(self, w, beat=False, **kwargs): + self.enabled = w.beat = beat + w.beat = None + + def create(self, w): + from celery.beat import EmbeddedService + if w.pool_cls.__module__.endswith(('gevent', 'eventlet')): + raise ImproperlyConfigured(ERR_B_GREEN) + b = w.beat = EmbeddedService(w.app, + schedule_filename=w.schedule_filename, + scheduler_cls=w.scheduler_cls) + return b + + +class StateDB(bootsteps.Step): + """This bootstep sets up the workers state db if enabled.""" + + def __init__(self, w, **kwargs): + self.enabled = w.state_db + w._persistence = None + + def create(self, w): + w._persistence = w.state.Persistent(w.state, w.state_db, w.app.clock) + atexit.register(w._persistence.save) + + +class Consumer(bootsteps.StartStopStep): + last = True + + def create(self, w): + if w.max_concurrency: + prefetch_count = max(w.min_concurrency, 1) * w.prefetch_multiplier + else: + prefetch_count = w.concurrency * w.prefetch_multiplier + c = w.consumer = self.instantiate( + w.consumer_cls, w.process_task, + hostname=w.hostname, + send_events=w.send_events, + init_callback=w.ready_callback, + initial_prefetch_count=prefetch_count, + pool=w.pool, + timer=w.timer, + app=w.app, + controller=w, + hub=w.hub, + worker_options=w.options, + disable_rate_limits=w.disable_rate_limits, + prefetch_multiplier=w.prefetch_multiplier, + ) + return c diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/consumer.py b/thesisenv/lib/python3.6/site-packages/celery/worker/consumer.py new file mode 100644 index 0000000..cc93d6c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/worker/consumer.py @@ -0,0 +1,887 @@ +# -*- coding: utf-8 -*- +""" +celery.worker.consumer +~~~~~~~~~~~~~~~~~~~~~~ + +This module contains the components responsible for consuming messages +from the broker, processing the messages and keeping the broker connections +up and running. + +""" +from __future__ import absolute_import + +import errno +import kombu +import logging +import os +import socket + +from collections import defaultdict +from functools import partial +from heapq import heappush +from operator import itemgetter +from time import sleep + +from billiard.common import restart_state +from billiard.exceptions import RestartFreqExceeded +from kombu.async.semaphore import DummyLock +from kombu.common import QoS, ignore_errors +from kombu.syn import _detect_environment +from kombu.utils.compat import get_errno +from kombu.utils.encoding import safe_repr, bytes_t +from kombu.utils.limits import TokenBucket + +from celery import chain +from celery import bootsteps +from celery.app.trace import build_tracer +from celery.canvas import signature +from celery.exceptions import InvalidTaskError +from celery.five import items, values +from celery.utils.functional import noop +from celery.utils.log import get_logger +from celery.utils.objects import Bunch +from celery.utils.text import truncate +from celery.utils.timeutils import humanize_seconds, rate + +from . import heartbeat, loops, pidbox +from .state import task_reserved, maybe_shutdown, revoked, reserved_requests + +try: + buffer_t = buffer +except NameError: # pragma: no cover + # Py3 does not have buffer, but we only need isinstance. + + class buffer_t(object): # noqa + pass + +__all__ = [ + 'Consumer', 'Connection', 'Events', 'Heart', 'Control', + 'Tasks', 'Evloop', 'Agent', 'Mingle', 'Gossip', 'dump_body', +] + +CLOSE = bootsteps.CLOSE +logger = get_logger(__name__) +debug, info, warn, error, crit = (logger.debug, logger.info, logger.warning, + logger.error, logger.critical) + +CONNECTION_RETRY = """\ +consumer: Connection to broker lost. \ +Trying to re-establish the connection...\ +""" + +CONNECTION_RETRY_STEP = """\ +Trying again {when}...\ +""" + +CONNECTION_ERROR = """\ +consumer: Cannot connect to %s: %s. +%s +""" + +CONNECTION_FAILOVER = """\ +Will retry using next failover.\ +""" + +UNKNOWN_FORMAT = """\ +Received and deleted unknown message. Wrong destination?!? + +The full contents of the message body was: %s +""" + +#: Error message for when an unregistered task is received. +UNKNOWN_TASK_ERROR = """\ +Received unregistered task of type %s. +The message has been ignored and discarded. + +Did you remember to import the module containing this task? +Or maybe you are using relative imports? +Please see http://bit.ly/gLye1c for more information. + +The full contents of the message body was: +%s +""" + +#: Error message for when an invalid task message is received. +INVALID_TASK_ERROR = """\ +Received invalid task message: %s +The message has been ignored and discarded. + +Please ensure your message conforms to the task +message protocol as described here: http://bit.ly/hYj41y + +The full contents of the message body was: +%s +""" + +MESSAGE_DECODE_ERROR = """\ +Can't decode message body: %r [type:%r encoding:%r headers:%s] + +body: %s +""" + +MESSAGE_REPORT = """\ +body: {0} +{{content_type:{1} content_encoding:{2} + delivery_info:{3} headers={4}}} +""" + +MINGLE_GET_FIELDS = itemgetter('clock', 'revoked') + + +def dump_body(m, body): + if isinstance(body, buffer_t): + body = bytes_t(body) + return '{0} ({1}b)'.format(truncate(safe_repr(body), 1024), + len(m.body)) + + +class Consumer(object): + Strategies = dict + + #: set when consumer is shutting down. + in_shutdown = False + + #: Optional callback called the first time the worker + #: is ready to receive tasks. + init_callback = None + + #: The current worker pool instance. + pool = None + + #: A timer used for high-priority internal tasks, such + #: as sending heartbeats. + timer = None + + restart_count = -1 # first start is the same as a restart + + class Blueprint(bootsteps.Blueprint): + name = 'Consumer' + default_steps = [ + 'celery.worker.consumer:Connection', + 'celery.worker.consumer:Mingle', + 'celery.worker.consumer:Events', + 'celery.worker.consumer:Gossip', + 'celery.worker.consumer:Heart', + 'celery.worker.consumer:Control', + 'celery.worker.consumer:Tasks', + 'celery.worker.consumer:Evloop', + 'celery.worker.consumer:Agent', + ] + + def shutdown(self, parent): + self.send_all(parent, 'shutdown') + + def __init__(self, on_task_request, + init_callback=noop, hostname=None, + pool=None, app=None, + timer=None, controller=None, hub=None, amqheartbeat=None, + worker_options=None, disable_rate_limits=False, + initial_prefetch_count=2, prefetch_multiplier=1, **kwargs): + self.app = app + self.controller = controller + self.init_callback = init_callback + self.hostname = hostname or socket.gethostname() + self.pid = os.getpid() + self.pool = pool + self.timer = timer + self.strategies = self.Strategies() + conninfo = self.app.connection() + self.connection_errors = conninfo.connection_errors + self.channel_errors = conninfo.channel_errors + self._restart_state = restart_state(maxR=5, maxT=1) + + self._does_info = logger.isEnabledFor(logging.INFO) + self.on_task_request = on_task_request + self.on_task_message = set() + self.amqheartbeat_rate = self.app.conf.BROKER_HEARTBEAT_CHECKRATE + self.disable_rate_limits = disable_rate_limits + self.initial_prefetch_count = initial_prefetch_count + self.prefetch_multiplier = prefetch_multiplier + + # this contains a tokenbucket for each task type by name, used for + # rate limits, or None if rate limits are disabled for that task. + self.task_buckets = defaultdict(lambda: None) + self.reset_rate_limits() + + self.hub = hub + if self.hub: + self.amqheartbeat = amqheartbeat + if self.amqheartbeat is None: + self.amqheartbeat = self.app.conf.BROKER_HEARTBEAT + else: + self.amqheartbeat = 0 + + if not hasattr(self, 'loop'): + self.loop = loops.asynloop if hub else loops.synloop + + if _detect_environment() == 'gevent': + # there's a gevent bug that causes timeouts to not be reset, + # so if the connection timeout is exceeded once, it can NEVER + # connect again. + self.app.conf.BROKER_CONNECTION_TIMEOUT = None + + self.steps = [] + self.blueprint = self.Blueprint( + app=self.app, on_close=self.on_close, + ) + self.blueprint.apply(self, **dict(worker_options or {}, **kwargs)) + + def bucket_for_task(self, type): + limit = rate(getattr(type, 'rate_limit', None)) + return TokenBucket(limit, capacity=1) if limit else None + + def reset_rate_limits(self): + self.task_buckets.update( + (n, self.bucket_for_task(t)) for n, t in items(self.app.tasks) + ) + + def _update_prefetch_count(self, index=0): + """Update prefetch count after pool/shrink grow operations. + + Index must be the change in number of processes as a positive + (increasing) or negative (decreasing) number. + + .. note:: + + Currently pool grow operations will end up with an offset + of +1 if the initial size of the pool was 0 (e.g. + ``--autoscale=1,0``). + + """ + num_processes = self.pool.num_processes + if not self.initial_prefetch_count or not num_processes: + return # prefetch disabled + self.initial_prefetch_count = ( + self.pool.num_processes * self.prefetch_multiplier + ) + return self._update_qos_eventually(index) + + def _update_qos_eventually(self, index): + return (self.qos.decrement_eventually if index < 0 + else self.qos.increment_eventually)( + abs(index) * self.prefetch_multiplier) + + def _limit_task(self, request, bucket, tokens): + if not bucket.can_consume(tokens): + hold = bucket.expected_time(tokens) + self.timer.call_after( + hold, self._limit_task, (request, bucket, tokens), + ) + else: + task_reserved(request) + self.on_task_request(request) + + def start(self): + blueprint = self.blueprint + while blueprint.state != CLOSE: + self.restart_count += 1 + maybe_shutdown() + try: + blueprint.start(self) + except self.connection_errors as exc: + if isinstance(exc, OSError) and get_errno(exc) == errno.EMFILE: + raise # Too many open files + maybe_shutdown() + try: + self._restart_state.step() + except RestartFreqExceeded as exc: + crit('Frequent restarts detected: %r', exc, exc_info=1) + sleep(1) + if blueprint.state != CLOSE and self.connection: + warn(CONNECTION_RETRY, exc_info=True) + try: + self.connection.collect() + except Exception: + pass + self.on_close() + blueprint.restart(self) + + def register_with_event_loop(self, hub): + self.blueprint.send_all( + self, 'register_with_event_loop', args=(hub, ), + description='Hub.register', + ) + + def shutdown(self): + self.in_shutdown = True + self.blueprint.shutdown(self) + + def stop(self): + self.blueprint.stop(self) + + def on_ready(self): + callback, self.init_callback = self.init_callback, None + if callback: + callback(self) + + def loop_args(self): + return (self, self.connection, self.task_consumer, + self.blueprint, self.hub, self.qos, self.amqheartbeat, + self.app.clock, self.amqheartbeat_rate) + + def on_decode_error(self, message, exc): + """Callback called if an error occurs while decoding + a message received. + + Simply logs the error and acknowledges the message so it + doesn't enter a loop. + + :param message: The message with errors. + :param exc: The original exception instance. + + """ + crit(MESSAGE_DECODE_ERROR, + exc, message.content_type, message.content_encoding, + safe_repr(message.headers), dump_body(message, message.body), + exc_info=1) + message.ack() + + def on_close(self): + # Clear internal queues to get rid of old messages. + # They can't be acked anyway, as a delivery tag is specific + # to the current channel. + if self.controller and self.controller.semaphore: + self.controller.semaphore.clear() + if self.timer: + self.timer.clear() + reserved_requests.clear() + if self.pool and self.pool.flush: + self.pool.flush() + + def connect(self): + """Establish the broker connection. + + Will retry establishing the connection if the + :setting:`BROKER_CONNECTION_RETRY` setting is enabled + + """ + conn = self.app.connection(heartbeat=self.amqheartbeat) + + # Callback called for each retry while the connection + # can't be established. + def _error_handler(exc, interval, next_step=CONNECTION_RETRY_STEP): + if getattr(conn, 'alt', None) and interval == 0: + next_step = CONNECTION_FAILOVER + error(CONNECTION_ERROR, conn.as_uri(), exc, + next_step.format(when=humanize_seconds(interval, 'in', ' '))) + + # remember that the connection is lazy, it won't establish + # until needed. + if not self.app.conf.BROKER_CONNECTION_RETRY: + # retry disabled, just call connect directly. + conn.connect() + return conn + + conn = conn.ensure_connection( + _error_handler, self.app.conf.BROKER_CONNECTION_MAX_RETRIES, + callback=maybe_shutdown, + ) + if self.hub: + conn.transport.register_with_event_loop(conn.connection, self.hub) + return conn + + def add_task_queue(self, queue, exchange=None, exchange_type=None, + routing_key=None, **options): + cset = self.task_consumer + queues = self.app.amqp.queues + # Must use in' here, as __missing__ will automatically + # create queues when CELERY_CREATE_MISSING_QUEUES is enabled. + # (Issue #1079) + if queue in queues: + q = queues[queue] + else: + exchange = queue if exchange is None else exchange + exchange_type = ('direct' if exchange_type is None + else exchange_type) + q = queues.select_add(queue, + exchange=exchange, + exchange_type=exchange_type, + routing_key=routing_key, **options) + if not cset.consuming_from(queue): + cset.add_queue(q) + cset.consume() + info('Started consuming from %s', queue) + + def cancel_task_queue(self, queue): + info('Canceling queue %s', queue) + self.app.amqp.queues.deselect(queue) + self.task_consumer.cancel_by_queue(queue) + + def apply_eta_task(self, task): + """Method called by the timer to apply a task with an + ETA/countdown.""" + task_reserved(task) + self.on_task_request(task) + self.qos.decrement_eventually() + + def _message_report(self, body, message): + return MESSAGE_REPORT.format(dump_body(message, body), + safe_repr(message.content_type), + safe_repr(message.content_encoding), + safe_repr(message.delivery_info), + safe_repr(message.headers)) + + def on_unknown_message(self, body, message): + warn(UNKNOWN_FORMAT, self._message_report(body, message)) + message.reject_log_error(logger, self.connection_errors) + + def on_unknown_task(self, body, message, exc): + error(UNKNOWN_TASK_ERROR, exc, dump_body(message, body), exc_info=True) + message.reject_log_error(logger, self.connection_errors) + + def on_invalid_task(self, body, message, exc): + error(INVALID_TASK_ERROR, exc, dump_body(message, body), exc_info=True) + message.reject_log_error(logger, self.connection_errors) + + def update_strategies(self): + loader = self.app.loader + for name, task in items(self.app.tasks): + self.strategies[name] = task.start_strategy(self.app, self) + task.__trace__ = build_tracer(name, task, loader, self.hostname, + app=self.app) + + def create_task_handler(self): + strategies = self.strategies + on_unknown_message = self.on_unknown_message + on_unknown_task = self.on_unknown_task + on_invalid_task = self.on_invalid_task + callbacks = self.on_task_message + + def on_task_received(body, message): + headers = message.headers + try: + type_, is_proto2 = body['task'], 0 + except (KeyError, TypeError): + try: + type_, is_proto2 = headers['task'], 1 + except (KeyError, TypeError): + return on_unknown_message(body, message) + + if is_proto2: + body = proto2_to_proto1( + self.app, type_, body, message, headers) + + try: + strategies[type_](message, body, + message.ack_log_error, + message.reject_log_error, + callbacks) + except KeyError as exc: + on_unknown_task(body, message, exc) + except InvalidTaskError as exc: + on_invalid_task(body, message, exc) + + return on_task_received + + def __repr__(self): + return ''.format( + self=self, state=self.blueprint.human_state(), + ) + + +def proto2_to_proto1(app, type_, body, message, headers): + args, kwargs, embed = body + embedded = _extract_proto2_embed(**embed) + chained = embedded.pop('chain') + new_body = dict( + _extract_proto2_headers(type_, **headers), + args=args, + kwargs=kwargs, + **embedded) + if chained: + new_body['callbacks'].append(chain(chained, app=app)) + return new_body + + +def _extract_proto2_headers(type_, id, retries, eta, expires, + group, timelimit, **_): + return { + 'id': id, + 'task': type_, + 'retries': retries, + 'eta': eta, + 'expires': expires, + 'utc': True, + 'taskset': group, + 'timelimit': timelimit, + } + + +def _extract_proto2_embed(callbacks, errbacks, chain, chord, **_): + return { + 'callbacks': callbacks or [], + 'errbacks': errbacks, + 'chain': chain, + 'chord': chord, + } + + +class Connection(bootsteps.StartStopStep): + + def __init__(self, c, **kwargs): + c.connection = None + + def start(self, c): + c.connection = c.connect() + info('Connected to %s', c.connection.as_uri()) + + def shutdown(self, c): + # We must set self.connection to None here, so + # that the green pidbox thread exits. + connection, c.connection = c.connection, None + if connection: + ignore_errors(connection, connection.close) + + def info(self, c, params='N/A'): + if c.connection: + params = c.connection.info() + params.pop('password', None) # don't send password. + return {'broker': params} + + +class Events(bootsteps.StartStopStep): + requires = (Connection, ) + + def __init__(self, c, send_events=None, **kwargs): + self.send_events = True + self.groups = None if send_events else ['worker'] + c.event_dispatcher = None + + def start(self, c): + # flush events sent while connection was down. + prev = self._close(c) + dis = c.event_dispatcher = c.app.events.Dispatcher( + c.connect(), hostname=c.hostname, + enabled=self.send_events, groups=self.groups, + ) + if prev: + dis.extend_buffer(prev) + dis.flush() + + def stop(self, c): + pass + + def _close(self, c): + if c.event_dispatcher: + dispatcher = c.event_dispatcher + # remember changes from remote control commands: + self.groups = dispatcher.groups + + # close custom connection + if dispatcher.connection: + ignore_errors(c, dispatcher.connection.close) + ignore_errors(c, dispatcher.close) + c.event_dispatcher = None + return dispatcher + + def shutdown(self, c): + self._close(c) + + +class Heart(bootsteps.StartStopStep): + requires = (Events, ) + + def __init__(self, c, without_heartbeat=False, heartbeat_interval=None, + **kwargs): + self.enabled = not without_heartbeat + self.heartbeat_interval = heartbeat_interval + c.heart = None + + def start(self, c): + c.heart = heartbeat.Heart( + c.timer, c.event_dispatcher, self.heartbeat_interval, + ) + c.heart.start() + + def stop(self, c): + c.heart = c.heart and c.heart.stop() + shutdown = stop + + +class Mingle(bootsteps.StartStopStep): + label = 'Mingle' + requires = (Events, ) + compatible_transports = set(['amqp', 'redis']) + + def __init__(self, c, without_mingle=False, **kwargs): + self.enabled = not without_mingle and self.compatible_transport(c.app) + + def compatible_transport(self, app): + with app.connection() as conn: + return conn.transport.driver_type in self.compatible_transports + + def start(self, c): + info('mingle: searching for neighbors') + I = c.app.control.inspect(timeout=1.0, connection=c.connection) + replies = I.hello(c.hostname, revoked._data) or {} + replies.pop(c.hostname, None) + if replies: + info('mingle: sync with %s nodes', + len([reply for reply, value in items(replies) if value])) + for reply in values(replies): + if reply: + try: + other_clock, other_revoked = MINGLE_GET_FIELDS(reply) + except KeyError: # reply from pre-3.1 worker + pass + else: + c.app.clock.adjust(other_clock) + revoked.update(other_revoked) + info('mingle: sync complete') + else: + info('mingle: all alone') + + +class Tasks(bootsteps.StartStopStep): + requires = (Mingle, ) + + def __init__(self, c, **kwargs): + c.task_consumer = c.qos = None + + def start(self, c): + c.update_strategies() + + # - RabbitMQ 3.3 completely redefines how basic_qos works.. + # This will detect if the new qos smenatics is in effect, + # and if so make sure the 'apply_global' flag is set on qos updates. + qos_global = not c.connection.qos_semantics_matches_spec + + # set initial prefetch count + c.connection.default_channel.basic_qos( + 0, c.initial_prefetch_count, qos_global, + ) + + c.task_consumer = c.app.amqp.TaskConsumer( + c.connection, on_decode_error=c.on_decode_error, + ) + + def set_prefetch_count(prefetch_count): + return c.task_consumer.qos( + prefetch_count=prefetch_count, + apply_global=qos_global, + ) + c.qos = QoS(set_prefetch_count, c.initial_prefetch_count) + + def stop(self, c): + if c.task_consumer: + debug('Canceling task consumer...') + ignore_errors(c, c.task_consumer.cancel) + + def shutdown(self, c): + if c.task_consumer: + self.stop(c) + debug('Closing consumer channel...') + ignore_errors(c, c.task_consumer.close) + c.task_consumer = None + + def info(self, c): + return {'prefetch_count': c.qos.value if c.qos else 'N/A'} + + +class Agent(bootsteps.StartStopStep): + conditional = True + requires = (Connection, ) + + def __init__(self, c, **kwargs): + self.agent_cls = self.enabled = c.app.conf.CELERYD_AGENT + + def create(self, c): + agent = c.agent = self.instantiate(self.agent_cls, c.connection) + return agent + + +class Control(bootsteps.StartStopStep): + requires = (Tasks, ) + + def __init__(self, c, **kwargs): + self.is_green = c.pool is not None and c.pool.is_green + self.box = (pidbox.gPidbox if self.is_green else pidbox.Pidbox)(c) + self.start = self.box.start + self.stop = self.box.stop + self.shutdown = self.box.shutdown + + def include_if(self, c): + return c.app.conf.CELERY_ENABLE_REMOTE_CONTROL + + +class Gossip(bootsteps.ConsumerStep): + label = 'Gossip' + requires = (Mingle, ) + _cons_stamp_fields = itemgetter( + 'id', 'clock', 'hostname', 'pid', 'topic', 'action', 'cver', + ) + compatible_transports = set(['amqp', 'redis']) + + def __init__(self, c, without_gossip=False, interval=5.0, **kwargs): + self.enabled = not without_gossip and self.compatible_transport(c.app) + self.app = c.app + c.gossip = self + self.Receiver = c.app.events.Receiver + self.hostname = c.hostname + self.full_hostname = '.'.join([self.hostname, str(c.pid)]) + self.on = Bunch( + node_join=set(), + node_leave=set(), + node_lost=set(), + ) + + self.timer = c.timer + if self.enabled: + self.state = c.app.events.State( + on_node_join=self.on_node_join, + on_node_leave=self.on_node_leave, + max_tasks_in_memory=1, + ) + if c.hub: + c._mutex = DummyLock() + self.update_state = self.state.event + self.interval = interval + self._tref = None + self.consensus_requests = defaultdict(list) + self.consensus_replies = {} + self.event_handlers = { + 'worker.elect': self.on_elect, + 'worker.elect.ack': self.on_elect_ack, + } + self.clock = c.app.clock + + self.election_handlers = { + 'task': self.call_task + } + + def compatible_transport(self, app): + with app.connection() as conn: + return conn.transport.driver_type in self.compatible_transports + + def election(self, id, topic, action=None): + self.consensus_replies[id] = [] + self.dispatcher.send( + 'worker-elect', + id=id, topic=topic, action=action, cver=1, + ) + + def call_task(self, task): + try: + signature(task, app=self.app).apply_async() + except Exception as exc: + error('Could not call task: %r', exc, exc_info=1) + + def on_elect(self, event): + try: + (id_, clock, hostname, pid, + topic, action, _) = self._cons_stamp_fields(event) + except KeyError as exc: + return error('election request missing field %s', exc, exc_info=1) + heappush( + self.consensus_requests[id_], + (clock, '%s.%s' % (hostname, pid), topic, action), + ) + self.dispatcher.send('worker-elect-ack', id=id_) + + def start(self, c): + super(Gossip, self).start(c) + self.dispatcher = c.event_dispatcher + + def on_elect_ack(self, event): + id = event['id'] + try: + replies = self.consensus_replies[id] + except KeyError: + return # not for us + alive_workers = self.state.alive_workers() + replies.append(event['hostname']) + + if len(replies) >= len(alive_workers): + _, leader, topic, action = self.clock.sort_heap( + self.consensus_requests[id], + ) + if leader == self.full_hostname: + info('I won the election %r', id) + try: + handler = self.election_handlers[topic] + except KeyError: + error('Unknown election topic %r', topic, exc_info=1) + else: + handler(action) + else: + info('node %s elected for %r', leader, id) + self.consensus_requests.pop(id, None) + self.consensus_replies.pop(id, None) + + def on_node_join(self, worker): + debug('%s joined the party', worker.hostname) + self._call_handlers(self.on.node_join, worker) + + def on_node_leave(self, worker): + debug('%s left', worker.hostname) + self._call_handlers(self.on.node_leave, worker) + + def on_node_lost(self, worker): + info('missed heartbeat from %s', worker.hostname) + self._call_handlers(self.on.node_lost, worker) + + def _call_handlers(self, handlers, *args, **kwargs): + for handler in handlers: + try: + handler(*args, **kwargs) + except Exception as exc: + error('Ignored error from handler %r: %r', + handler, exc, exc_info=1) + + def register_timer(self): + if self._tref is not None: + self._tref.cancel() + self._tref = self.timer.call_repeatedly(self.interval, self.periodic) + + def periodic(self): + workers = self.state.workers + dirty = set() + for worker in values(workers): + if not worker.alive: + dirty.add(worker) + self.on_node_lost(worker) + for worker in dirty: + workers.pop(worker.hostname, None) + + def get_consumers(self, channel): + self.register_timer() + ev = self.Receiver(channel, routing_key='worker.#') + return [kombu.Consumer( + channel, + queues=[ev.queue], + on_message=partial(self.on_message, ev.event_from_message), + no_ack=True + )] + + def on_message(self, prepare, message): + _type = message.delivery_info['routing_key'] + + # For redis when `fanout_patterns=False` (See Issue #1882) + if _type.split('.', 1)[0] == 'task': + return + try: + handler = self.event_handlers[_type] + except KeyError: + pass + else: + return handler(message.payload) + + hostname = (message.headers.get('hostname') or + message.payload['hostname']) + if hostname != self.hostname: + type, event = prepare(message.payload) + self.update_state(event) + else: + self.clock.forward() + + +class Evloop(bootsteps.StartStopStep): + label = 'event loop' + last = True + + def start(self, c): + self.patch_all(c) + c.loop(*c.loop_args()) + + def patch_all(self, c): + c.qos._mutex = DummyLock() diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/control.py b/thesisenv/lib/python3.6/site-packages/celery/worker/control.py new file mode 100644 index 0000000..e8b033d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/worker/control.py @@ -0,0 +1,385 @@ +# -*- coding: utf-8 -*- +""" + celery.worker.control + ~~~~~~~~~~~~~~~~~~~~~ + + Remote control commands. + +""" +from __future__ import absolute_import + +import io +import tempfile + +from kombu.utils.encoding import safe_repr + +from celery.exceptions import WorkerShutdown +from celery.five import UserDict, items, string_t +from celery.platforms import signals as _signals +from celery.utils import timeutils +from celery.utils.functional import maybe_list +from celery.utils.log import get_logger +from celery.utils import jsonify + +from . import state as worker_state +from .state import revoked +from .job import Request + +__all__ = ['Panel'] +DEFAULT_TASK_INFO_ITEMS = ('exchange', 'routing_key', 'rate_limit') +logger = get_logger(__name__) + + +class Panel(UserDict): + data = dict() # Global registry. + + @classmethod + def register(cls, method, name=None): + cls.data[name or method.__name__] = method + return method + + +def _find_requests_by_id(ids, requests): + found, total = 0, len(ids) + for request in requests: + if request.id in ids: + yield request + found += 1 + if found >= total: + break + + +@Panel.register +def query_task(state, ids, **kwargs): + ids = maybe_list(ids) + + def reqinfo(state, req): + return state, req.info() + + reqs = dict((req.id, ('reserved', req.info())) + for req in _find_requests_by_id( + ids, worker_state.reserved_requests)) + reqs.update(dict( + (req.id, ('active', req.info())) + for req in _find_requests_by_id( + ids, worker_state.active_requests, + ) + )) + + return reqs + + +@Panel.register +def revoke(state, task_id, terminate=False, signal=None, **kwargs): + """Revoke task by task id.""" + # supports list argument since 3.1 + task_ids, task_id = set(maybe_list(task_id) or []), None + size = len(task_ids) + terminated = set() + + revoked.update(task_ids) + if terminate: + signum = _signals.signum(signal or 'TERM') + # reserved_requests changes size during iteration + # so need to consume the items first, then terminate after. + requests = set(_find_requests_by_id( + task_ids, + worker_state.reserved_requests, + )) + for request in requests: + if request.id not in terminated: + terminated.add(request.id) + logger.info('Terminating %s (%s)', request.id, signum) + request.terminate(state.consumer.pool, signal=signum) + if len(terminated) >= size: + break + + if not terminated: + return {'ok': 'terminate: tasks unknown'} + return {'ok': 'terminate: {0}'.format(', '.join(terminated))} + + idstr = ', '.join(task_ids) + logger.info('Tasks flagged as revoked: %s', idstr) + return {'ok': 'tasks {0} flagged as revoked'.format(idstr)} + + +@Panel.register +def report(state): + return {'ok': state.app.bugreport()} + + +@Panel.register +def enable_events(state): + dispatcher = state.consumer.event_dispatcher + if dispatcher.groups and 'task' not in dispatcher.groups: + dispatcher.groups.add('task') + logger.info('Events of group {task} enabled by remote.') + return {'ok': 'task events enabled'} + return {'ok': 'task events already enabled'} + + +@Panel.register +def disable_events(state): + dispatcher = state.consumer.event_dispatcher + if 'task' in dispatcher.groups: + dispatcher.groups.discard('task') + logger.info('Events of group {task} disabled by remote.') + return {'ok': 'task events disabled'} + return {'ok': 'task events already disabled'} + + +@Panel.register +def heartbeat(state): + logger.debug('Heartbeat requested by remote.') + dispatcher = state.consumer.event_dispatcher + dispatcher.send('worker-heartbeat', freq=5, **worker_state.SOFTWARE_INFO) + + +@Panel.register +def rate_limit(state, task_name, rate_limit, **kwargs): + """Set new rate limit for a task type. + + See :attr:`celery.task.base.Task.rate_limit`. + + :param task_name: Type of task. + :param rate_limit: New rate limit. + + """ + + try: + timeutils.rate(rate_limit) + except ValueError as exc: + return {'error': 'Invalid rate limit string: {0!r}'.format(exc)} + + try: + state.app.tasks[task_name].rate_limit = rate_limit + except KeyError: + logger.error('Rate limit attempt for unknown task %s', + task_name, exc_info=True) + return {'error': 'unknown task'} + + state.consumer.reset_rate_limits() + + if not rate_limit: + logger.info('Rate limits disabled for tasks of type %s', task_name) + return {'ok': 'rate limit disabled successfully'} + + logger.info('New rate limit for tasks of type %s: %s.', + task_name, rate_limit) + return {'ok': 'new rate limit set successfully'} + + +@Panel.register +def time_limit(state, task_name=None, hard=None, soft=None, **kwargs): + try: + task = state.app.tasks[task_name] + except KeyError: + logger.error('Change time limit attempt for unknown task %s', + task_name, exc_info=True) + return {'error': 'unknown task'} + + task.soft_time_limit = soft + task.time_limit = hard + + logger.info('New time limits for tasks of type %s: soft=%s hard=%s', + task_name, soft, hard) + return {'ok': 'time limits set successfully'} + + +@Panel.register +def dump_schedule(state, safe=False, **kwargs): + + def prepare_entries(): + for waiting in state.consumer.timer.schedule.queue: + try: + arg0 = waiting.entry.args[0] + except (IndexError, TypeError): + continue + else: + if isinstance(arg0, Request): + yield {'eta': arg0.eta.isoformat() if arg0.eta else None, + 'priority': waiting.priority, + 'request': arg0.info(safe=safe)} + return list(prepare_entries()) + + +@Panel.register +def dump_reserved(state, safe=False, **kwargs): + reserved = worker_state.reserved_requests - worker_state.active_requests + if not reserved: + return [] + return [request.info(safe=safe) for request in reserved] + + +@Panel.register +def dump_active(state, safe=False, **kwargs): + return [request.info(safe=safe) + for request in worker_state.active_requests] + + +@Panel.register +def stats(state, **kwargs): + return state.consumer.controller.stats() + + +@Panel.register +def objgraph(state, num=200, max_depth=10, type='Request'): # pragma: no cover + try: + import objgraph + except ImportError: + raise ImportError('Requires the objgraph library') + print('Dumping graph for type %r' % (type, )) + with tempfile.NamedTemporaryFile(prefix='cobjg', + suffix='.png', delete=False) as fh: + objects = objgraph.by_type(type)[:num] + objgraph.show_backrefs( + objects, + max_depth=max_depth, highlight=lambda v: v in objects, + filename=fh.name, + ) + return {'filename': fh.name} + + +@Panel.register +def memsample(state, **kwargs): # pragma: no cover + from celery.utils.debug import sample_mem + return sample_mem() + + +@Panel.register +def memdump(state, samples=10, **kwargs): # pragma: no cover + from celery.utils.debug import memdump + out = io.StringIO() + memdump(file=out) + return out.getvalue() + + +@Panel.register +def clock(state, **kwargs): + return {'clock': state.app.clock.value} + + +@Panel.register +def dump_revoked(state, **kwargs): + return list(worker_state.revoked) + + +@Panel.register +def hello(state, from_node, revoked=None, **kwargs): + if from_node != state.hostname: + logger.info('sync with %s', from_node) + if revoked: + worker_state.revoked.update(revoked) + return {'revoked': worker_state.revoked._data, + 'clock': state.app.clock.forward()} + + +@Panel.register +def dump_tasks(state, taskinfoitems=None, builtins=False, **kwargs): + reg = state.app.tasks + taskinfoitems = taskinfoitems or DEFAULT_TASK_INFO_ITEMS + + tasks = reg if builtins else ( + task for task in reg if not task.startswith('celery.')) + + def _extract_info(task): + fields = dict((field, str(getattr(task, field, None))) + for field in taskinfoitems + if getattr(task, field, None) is not None) + if fields: + info = ['='.join(f) for f in items(fields)] + return '{0} [{1}]'.format(task.name, ' '.join(info)) + return task.name + + return [_extract_info(reg[task]) for task in sorted(tasks)] + + +@Panel.register +def ping(state, **kwargs): + return {'ok': 'pong'} + + +@Panel.register +def pool_grow(state, n=1, **kwargs): + if state.consumer.controller.autoscaler: + state.consumer.controller.autoscaler.force_scale_up(n) + else: + state.consumer.pool.grow(n) + state.consumer._update_prefetch_count(n) + return {'ok': 'pool will grow'} + + +@Panel.register +def pool_shrink(state, n=1, **kwargs): + if state.consumer.controller.autoscaler: + state.consumer.controller.autoscaler.force_scale_down(n) + else: + state.consumer.pool.shrink(n) + state.consumer._update_prefetch_count(-n) + return {'ok': 'pool will shrink'} + + +@Panel.register +def pool_restart(state, modules=None, reload=False, reloader=None, **kwargs): + if state.app.conf.CELERYD_POOL_RESTARTS: + state.consumer.controller.reload(modules, reload, reloader=reloader) + return {'ok': 'reload started'} + else: + raise ValueError('Pool restarts not enabled') + + +@Panel.register +def autoscale(state, max=None, min=None): + autoscaler = state.consumer.controller.autoscaler + if autoscaler: + max_, min_ = autoscaler.update(max, min) + return {'ok': 'autoscale now max={0} min={1}'.format(max_, min_)} + raise ValueError('Autoscale not enabled') + + +@Panel.register +def shutdown(state, msg='Got shutdown from remote', **kwargs): + logger.warning(msg) + raise WorkerShutdown(msg) + + +@Panel.register +def add_consumer(state, queue, exchange=None, exchange_type=None, + routing_key=None, **options): + state.consumer.add_task_queue(queue, exchange, exchange_type, + routing_key, **options) + return {'ok': 'add consumer {0}'.format(queue)} + + +@Panel.register +def cancel_consumer(state, queue=None, **_): + state.consumer.cancel_task_queue(queue) + return {'ok': 'no longer consuming from {0}'.format(queue)} + + +@Panel.register +def active_queues(state): + """Return information about the queues a worker consumes from.""" + if state.consumer.task_consumer: + return [dict(queue.as_dict(recurse=True)) + for queue in state.consumer.task_consumer.queues] + return [] + + +def _wanted_config_key(key): + return (isinstance(key, string_t) and + key.isupper() and + not key.startswith('__')) + + +@Panel.register +def dump_conf(state, with_defaults=False, **kwargs): + return jsonify(state.app.conf.table(with_defaults=with_defaults), + keyfilter=_wanted_config_key, + unknown_type_filter=safe_repr) + + +@Panel.register +def election(state, id, topic, action=None, **kwargs): + if state.consumer.gossip: + state.consumer.gossip.election(id, topic, action) diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/heartbeat.py b/thesisenv/lib/python3.6/site-packages/celery/worker/heartbeat.py new file mode 100644 index 0000000..cf46ab0 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/worker/heartbeat.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +""" + celery.worker.heartbeat + ~~~~~~~~~~~~~~~~~~~~~~~ + + This is the internal thread that sends heartbeat events + at regular intervals. + +""" +from __future__ import absolute_import + +from celery.utils.sysinfo import load_average + +from .state import SOFTWARE_INFO, active_requests, all_total_count + +__all__ = ['Heart'] + + +class Heart(object): + """Timer sending heartbeats at regular intervals. + + :param timer: Timer instance. + :param eventer: Event dispatcher used to send the event. + :keyword interval: Time in seconds between heartbeats. + Default is 2 seconds. + + """ + + def __init__(self, timer, eventer, interval=None): + self.timer = timer + self.eventer = eventer + self.interval = float(interval or 2.0) + self.tref = None + + # Make event dispatcher start/stop us when enabled/disabled. + self.eventer.on_enabled.add(self.start) + self.eventer.on_disabled.add(self.stop) + + def _send(self, event): + return self.eventer.send(event, freq=self.interval, + active=len(active_requests), + processed=all_total_count[0], + loadavg=load_average(), + **SOFTWARE_INFO) + + def start(self): + if self.eventer.enabled: + self._send('worker-online') + self.tref = self.timer.call_repeatedly( + self.interval, self._send, ('worker-heartbeat', ), + ) + + def stop(self): + if self.tref is not None: + self.timer.cancel(self.tref) + self.tref = None + if self.eventer.enabled: + self._send('worker-offline') diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/job.py b/thesisenv/lib/python3.6/site-packages/celery/worker/job.py new file mode 100644 index 0000000..793de3d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/worker/job.py @@ -0,0 +1,595 @@ +# -*- coding: utf-8 -*- +""" + celery.worker.job + ~~~~~~~~~~~~~~~~~ + + This module defines the :class:`Request` class, + which specifies how tasks are executed. + +""" +from __future__ import absolute_import, unicode_literals + +import logging +import socket +import sys + +from billiard.einfo import ExceptionInfo +from datetime import datetime +from weakref import ref + +from kombu.utils import kwdict, reprcall +from kombu.utils.encoding import safe_repr, safe_str + +from celery import signals +from celery.app.trace import trace_task, trace_task_ret +from celery.exceptions import ( + Ignore, TaskRevokedError, InvalidTaskError, + SoftTimeLimitExceeded, TimeLimitExceeded, + WorkerLostError, Terminated, Retry, Reject, +) +from celery.five import items, monotonic, string, string_t +from celery.platforms import signals as _signals +from celery.utils import fun_takes_kwargs +from celery.utils.functional import noop +from celery.utils.log import get_logger +from celery.utils.serialization import get_pickled_exception +from celery.utils.text import truncate +from celery.utils.timeutils import maybe_iso8601, timezone, maybe_make_aware + +from . import state + +__all__ = ['Request'] + +IS_PYPY = hasattr(sys, 'pypy_version_info') + +logger = get_logger(__name__) +debug, info, warn, error = (logger.debug, logger.info, + logger.warning, logger.error) +_does_info = False +_does_debug = False + +#: Max length of result representation +RESULT_MAXLEN = 128 + + +def __optimize__(): + # this is also called by celery.app.trace.setup_worker_optimizations + global _does_debug + global _does_info + _does_debug = logger.isEnabledFor(logging.DEBUG) + _does_info = logger.isEnabledFor(logging.INFO) +__optimize__() + +# Localize +tz_utc = timezone.utc +tz_or_local = timezone.tz_or_local +send_revoked = signals.task_revoked.send + +task_accepted = state.task_accepted +task_ready = state.task_ready +revoked_tasks = state.revoked + +NEEDS_KWDICT = sys.version_info <= (2, 6) + +#: Use when no message object passed to :class:`Request`. +DEFAULT_FIELDS = { + 'headers': None, + 'reply_to': None, + 'correlation_id': None, + 'delivery_info': { + 'exchange': None, + 'routing_key': None, + 'priority': 0, + 'redelivered': False, + }, +} + + +class Request(object): + """A request for task execution.""" + if not IS_PYPY: # pragma: no cover + __slots__ = ( + 'app', 'name', 'id', 'args', 'kwargs', 'on_ack', + 'hostname', 'eventer', 'connection_errors', 'task', 'eta', + 'expires', 'request_dict', 'acknowledged', 'on_reject', + 'utc', 'time_start', 'worker_pid', '_already_revoked', + '_terminate_on_ack', '_apply_result', + '_tzlocal', '__weakref__', '__dict__', + ) + + #: Format string used to log task success. + success_msg = """\ + Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s + """ + + #: Format string used to log task failure. + error_msg = """\ + Task %(name)s[%(id)s] %(description)s: %(exc)s + """ + + #: Format string used to log internal error. + internal_error_msg = """\ + Task %(name)s[%(id)s] %(description)s: %(exc)s + """ + + ignored_msg = """\ + Task %(name)s[%(id)s] %(description)s + """ + + rejected_msg = """\ + Task %(name)s[%(id)s] %(exc)s + """ + + #: Format string used to log task retry. + retry_msg = """Task %(name)s[%(id)s] retry: %(exc)s""" + + def __init__(self, body, on_ack=noop, + hostname=None, eventer=None, app=None, + connection_errors=None, request_dict=None, + message=None, task=None, on_reject=noop, **opts): + self.app = app + name = self.name = body['task'] + self.id = body['id'] + self.args = body.get('args', []) + self.kwargs = body.get('kwargs', {}) + try: + self.kwargs.items + except AttributeError: + raise InvalidTaskError( + 'Task keyword arguments is not a mapping') + if NEEDS_KWDICT: + self.kwargs = kwdict(self.kwargs) + eta = body.get('eta') + expires = body.get('expires') + utc = self.utc = body.get('utc', False) + self.on_ack = on_ack + self.on_reject = on_reject + self.hostname = hostname or socket.gethostname() + self.eventer = eventer + self.connection_errors = connection_errors or () + self.task = task or self.app.tasks[name] + self.acknowledged = self._already_revoked = False + self.time_start = self.worker_pid = self._terminate_on_ack = None + self._apply_result = None + self._tzlocal = None + + # timezone means the message is timezone-aware, and the only timezone + # supported at this point is UTC. + if eta is not None: + try: + self.eta = maybe_iso8601(eta) + except (AttributeError, ValueError, TypeError) as exc: + raise InvalidTaskError( + 'invalid eta value {0!r}: {1}'.format(eta, exc)) + if utc: + self.eta = maybe_make_aware(self.eta, self.tzlocal) + else: + self.eta = None + if expires is not None: + try: + self.expires = maybe_iso8601(expires) + except (AttributeError, ValueError, TypeError) as exc: + raise InvalidTaskError( + 'invalid expires value {0!r}: {1}'.format(expires, exc)) + if utc: + self.expires = maybe_make_aware(self.expires, self.tzlocal) + else: + self.expires = None + + if message: + delivery_info = message.delivery_info or {} + properties = message.properties or {} + body.update({ + 'headers': message.headers, + 'reply_to': properties.get('reply_to'), + 'correlation_id': properties.get('correlation_id'), + 'delivery_info': { + 'exchange': delivery_info.get('exchange'), + 'routing_key': delivery_info.get('routing_key'), + 'priority': properties.get( + 'priority', delivery_info.get('priority')), + 'redelivered': delivery_info.get('redelivered'), + } + + }) + else: + body.update(DEFAULT_FIELDS) + self.request_dict = body + + @property + def delivery_info(self): + return self.request_dict['delivery_info'] + + def extend_with_default_kwargs(self): + """Extend the tasks keyword arguments with standard task arguments. + + Currently these are `logfile`, `loglevel`, `task_id`, + `task_name`, `task_retries`, and `delivery_info`. + + See :meth:`celery.task.base.Task.run` for more information. + + Magic keyword arguments are deprecated and will be removed + in version 4.0. + + """ + kwargs = dict(self.kwargs) + default_kwargs = {'logfile': None, # deprecated + 'loglevel': None, # deprecated + 'task_id': self.id, + 'task_name': self.name, + 'task_retries': self.request_dict.get('retries', 0), + 'task_is_eager': False, + 'delivery_info': self.delivery_info} + fun = self.task.run + supported_keys = fun_takes_kwargs(fun, default_kwargs) + extend_with = dict((key, val) for key, val in items(default_kwargs) + if key in supported_keys) + kwargs.update(extend_with) + return kwargs + + def execute_using_pool(self, pool, **kwargs): + """Used by the worker to send this task to the pool. + + :param pool: A :class:`celery.concurrency.base.TaskPool` instance. + + :raises celery.exceptions.TaskRevokedError: if the task was revoked + and ignored. + + """ + uuid = self.id + task = self.task + if self.revoked(): + raise TaskRevokedError(uuid) + + hostname = self.hostname + kwargs = self.kwargs + if task.accept_magic_kwargs: + kwargs = self.extend_with_default_kwargs() + request = self.request_dict + request.update({'hostname': hostname, 'is_eager': False, + 'delivery_info': self.delivery_info, + 'group': self.request_dict.get('taskset')}) + timeout, soft_timeout = request.get('timelimit', (None, None)) + timeout = timeout or task.time_limit + soft_timeout = soft_timeout or task.soft_time_limit + result = pool.apply_async( + trace_task_ret, + args=(self.name, uuid, self.args, kwargs, request), + accept_callback=self.on_accepted, + timeout_callback=self.on_timeout, + callback=self.on_success, + error_callback=self.on_failure, + soft_timeout=soft_timeout, + timeout=timeout, + correlation_id=uuid, + ) + # cannot create weakref to None + self._apply_result = ref(result) if result is not None else result + return result + + def execute(self, loglevel=None, logfile=None): + """Execute the task in a :func:`~celery.app.trace.trace_task`. + + :keyword loglevel: The loglevel used by the task. + :keyword logfile: The logfile used by the task. + + """ + if self.revoked(): + return + + # acknowledge task as being processed. + if not self.task.acks_late: + self.acknowledge() + + kwargs = self.kwargs + if self.task.accept_magic_kwargs: + kwargs = self.extend_with_default_kwargs() + request = self.request_dict + request.update({'loglevel': loglevel, 'logfile': logfile, + 'hostname': self.hostname, 'is_eager': False, + 'delivery_info': self.delivery_info}) + retval = trace_task(self.task, self.id, self.args, kwargs, request, + hostname=self.hostname, loader=self.app.loader, + app=self.app) + self.acknowledge() + return retval + + def maybe_expire(self): + """If expired, mark the task as revoked.""" + if self.expires: + now = datetime.now(self.expires.tzinfo) + if now > self.expires: + revoked_tasks.add(self.id) + return True + + def terminate(self, pool, signal=None): + signal = _signals.signum(signal or 'TERM') + if self.time_start: + pool.terminate_job(self.worker_pid, signal) + self._announce_revoked('terminated', True, signal, False) + else: + self._terminate_on_ack = pool, signal + if self._apply_result is not None: + obj = self._apply_result() # is a weakref + if obj is not None: + obj.terminate(signal) + + def _announce_revoked(self, reason, terminated, signum, expired): + task_ready(self) + self.send_event('task-revoked', + terminated=terminated, signum=signum, expired=expired) + if self.store_errors: + self.task.backend.mark_as_revoked(self.id, reason, request=self) + self.acknowledge() + self._already_revoked = True + send_revoked(self.task, request=self, + terminated=terminated, signum=signum, expired=expired) + + def revoked(self): + """If revoked, skip task and mark state.""" + expired = False + if self._already_revoked: + return True + if self.expires: + expired = self.maybe_expire() + if self.id in revoked_tasks: + info('Discarding revoked task: %s[%s]', self.name, self.id) + self._announce_revoked( + 'expired' if expired else 'revoked', False, None, expired, + ) + return True + return False + + def send_event(self, type, **fields): + if self.eventer and self.eventer.enabled and self.task.send_events: + self.eventer.send(type, uuid=self.id, **fields) + + def on_accepted(self, pid, time_accepted): + """Handler called when task is accepted by worker pool.""" + self.worker_pid = pid + self.time_start = time_accepted + task_accepted(self) + if not self.task.acks_late: + self.acknowledge() + self.send_event('task-started') + if _does_debug: + debug('Task accepted: %s[%s] pid:%r', self.name, self.id, pid) + if self._terminate_on_ack is not None: + self.terminate(*self._terminate_on_ack) + + def on_timeout(self, soft, timeout): + """Handler called if the task times out.""" + task_ready(self) + if soft: + warn('Soft time limit (%ss) exceeded for %s[%s]', + timeout, self.name, self.id) + exc = SoftTimeLimitExceeded(timeout) + else: + error('Hard time limit (%ss) exceeded for %s[%s]', + timeout, self.name, self.id) + exc = TimeLimitExceeded(timeout) + + if self.store_errors: + self.task.backend.mark_as_failure(self.id, exc, request=self) + + if self.task.acks_late: + self.acknowledge() + + def on_success(self, ret_value, now=None, nowfun=monotonic): + """Handler called if the task was successfully processed.""" + if isinstance(ret_value, ExceptionInfo): + if isinstance(ret_value.exception, ( + SystemExit, KeyboardInterrupt)): + raise ret_value.exception + return self.on_failure(ret_value) + task_ready(self) + + if self.task.acks_late: + self.acknowledge() + + if self.eventer and self.eventer.enabled: + now = nowfun() + runtime = self.time_start and (now - self.time_start) or 0 + self.send_event('task-succeeded', + result=safe_repr(ret_value), runtime=runtime) + + if _does_info: + now = now or nowfun() + runtime = self.time_start and (now - self.time_start) or 0 + info(self.success_msg.strip(), { + 'id': self.id, 'name': self.name, + 'return_value': self.repr_result(ret_value), + 'runtime': runtime}) + + def on_retry(self, exc_info): + """Handler called if the task should be retried.""" + if self.task.acks_late: + self.acknowledge() + + self.send_event('task-retried', + exception=safe_repr(exc_info.exception.exc), + traceback=safe_str(exc_info.traceback)) + + if _does_info: + info(self.retry_msg.strip(), + {'id': self.id, 'name': self.name, + 'exc': exc_info.exception}) + + def on_failure(self, exc_info): + """Handler called if the task raised an exception.""" + task_ready(self) + send_failed_event = True + + if not exc_info.internal: + exc = exc_info.exception + + if isinstance(exc, Retry): + return self.on_retry(exc_info) + + # These are special cases where the process would not have had + # time to write the result. + if self.store_errors: + if isinstance(exc, WorkerLostError): + self.task.backend.mark_as_failure( + self.id, exc, request=self, + ) + elif isinstance(exc, Terminated): + self._announce_revoked( + 'terminated', True, string(exc), False) + send_failed_event = False # already sent revoked event + # (acks_late) acknowledge after result stored. + if self.task.acks_late: + self.acknowledge() + self._log_error(exc_info, send_failed_event=send_failed_event) + + def _log_error(self, einfo, send_failed_event=True): + einfo.exception = get_pickled_exception(einfo.exception) + eobj = einfo.exception + exception, traceback, exc_info, internal, sargs, skwargs = ( + safe_repr(eobj), + safe_str(einfo.traceback), + einfo.exc_info, + einfo.internal, + safe_repr(self.args), + safe_repr(self.kwargs), + ) + task = self.task + if task.throws and isinstance(eobj, task.throws): + do_send_mail, severity, exc_info, description = ( + False, logging.INFO, None, 'raised expected', + ) + else: + do_send_mail, severity, description = ( + True, logging.ERROR, 'raised unexpected', + ) + + format = self.error_msg + if internal: + if isinstance(einfo.exception, MemoryError): + raise MemoryError('Process got: %s' % (einfo.exception, )) + elif isinstance(einfo.exception, Reject): + format = self.rejected_msg + description = 'rejected' + severity = logging.WARN + send_failed_event = False + self.reject(requeue=einfo.exception.requeue) + elif isinstance(einfo.exception, Ignore): + format = self.ignored_msg + description = 'ignored' + severity = logging.INFO + exc_info = None + send_failed_event = False + self.acknowledge() + else: + format = self.internal_error_msg + description = 'INTERNAL ERROR' + severity = logging.CRITICAL + + if send_failed_event: + self.send_event( + 'task-failed', exception=exception, traceback=traceback, + ) + + context = { + 'hostname': self.hostname, + 'id': self.id, + 'name': self.name, + 'exc': exception, + 'traceback': traceback, + 'args': sargs, + 'kwargs': skwargs, + 'description': description, + } + + logger.log(severity, format.strip(), context, + exc_info=exc_info, + extra={'data': {'id': self.id, + 'name': self.name, + 'args': sargs, + 'kwargs': skwargs, + 'hostname': self.hostname, + 'internal': internal}}) + + if do_send_mail: + task.send_error_email(context, einfo.exception) + + def acknowledge(self): + """Acknowledge task.""" + if not self.acknowledged: + self.on_ack(logger, self.connection_errors) + self.acknowledged = True + + def reject(self, requeue=False): + if not self.acknowledged: + self.on_reject(logger, self.connection_errors, requeue) + self.acknowledged = True + + def repr_result(self, result, maxlen=RESULT_MAXLEN): + # 46 is the length needed to fit + # 'the quick brown fox jumps over the lazy dog' :) + if not isinstance(result, string_t): + result = safe_repr(result) + return truncate(result) if len(result) > maxlen else result + + def info(self, safe=False): + return {'id': self.id, + 'name': self.name, + 'args': self.args if safe else safe_repr(self.args), + 'kwargs': self.kwargs if safe else safe_repr(self.kwargs), + 'hostname': self.hostname, + 'time_start': self.time_start, + 'acknowledged': self.acknowledged, + 'delivery_info': self.delivery_info, + 'worker_pid': self.worker_pid} + + def __str__(self): + return '{0.name}[{0.id}]{1}{2}'.format( + self, + ' eta:[{0}]'.format(self.eta) if self.eta else '', + ' expires:[{0}]'.format(self.expires) if self.expires else '', + ) + shortinfo = __str__ + + def __repr__(self): + return '<{0} {1}: {2}>'.format( + type(self).__name__, self.id, + reprcall(self.name, self.args, self.kwargs)) + + @property + def tzlocal(self): + if self._tzlocal is None: + self._tzlocal = self.app.conf.CELERY_TIMEZONE + return self._tzlocal + + @property + def store_errors(self): + return (not self.task.ignore_result or + self.task.store_errors_even_if_ignored) + + @property + def task_id(self): + # XXX compat + return self.id + + @task_id.setter # noqa + def task_id(self, value): + self.id = value + + @property + def task_name(self): + # XXX compat + return self.name + + @task_name.setter # noqa + def task_name(self, value): + self.name = value + + @property + def reply_to(self): + # used by rpc backend when failures reported by parent process + return self.request_dict['reply_to'] + + @property + def correlation_id(self): + # used similarly to reply_to + return self.request_dict['correlation_id'] diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/loops.py b/thesisenv/lib/python3.6/site-packages/celery/worker/loops.py new file mode 100644 index 0000000..8b006a8 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/worker/loops.py @@ -0,0 +1,108 @@ +""" +celery.worker.loop +~~~~~~~~~~~~~~~~~~ + +The consumers highly-optimized inner loop. + +""" +from __future__ import absolute_import + +import socket + +from celery.bootsteps import RUN +from celery.exceptions import WorkerShutdown, WorkerTerminate, WorkerLostError +from celery.utils.log import get_logger + +from . import state + +__all__ = ['asynloop', 'synloop'] + +logger = get_logger(__name__) +error = logger.error + + +def asynloop(obj, connection, consumer, blueprint, hub, qos, + heartbeat, clock, hbrate=2.0, RUN=RUN): + """Non-blocking event loop consuming messages until connection is lost, + or shutdown is requested.""" + update_qos = qos.update + hbtick = connection.heartbeat_check + errors = connection.connection_errors + heartbeat = connection.get_heartbeat_interval() # negotiated + + on_task_received = obj.create_task_handler() + + if heartbeat and connection.supports_heartbeats: + hub.call_repeatedly(heartbeat / hbrate, hbtick, hbrate) + + consumer.callbacks = [on_task_received] + consumer.consume() + obj.on_ready() + obj.controller.register_with_event_loop(hub) + obj.register_with_event_loop(hub) + + # did_start_ok will verify that pool processes were able to start, + # but this will only work the first time we start, as + # maxtasksperchild will mess up metrics. + if not obj.restart_count and not obj.pool.did_start_ok(): + raise WorkerLostError('Could not start worker processes') + + # consumer.consume() may have prefetched up to our + # limit - drain an event so we are in a clean state + # prior to starting our event loop. + if connection.transport.driver_type == 'amqp': + hub.call_soon(connection.drain_events) + + # FIXME: Use loop.run_forever + # Tried and works, but no time to test properly before release. + hub.propagate_errors = errors + loop = hub.create_loop() + + try: + while blueprint.state == RUN and obj.connection: + # shutdown if signal handlers told us to. + if state.should_stop: + raise WorkerShutdown() + elif state.should_terminate: + raise WorkerTerminate() + + # We only update QoS when there is no more messages to read. + # This groups together qos calls, and makes sure that remote + # control commands will be prioritized over task messages. + if qos.prev != qos.value: + update_qos() + + try: + next(loop) + except StopIteration: + loop = hub.create_loop() + finally: + try: + hub.reset() + except Exception as exc: + error( + 'Error cleaning up after event loop: %r', exc, exc_info=1, + ) + + +def synloop(obj, connection, consumer, blueprint, hub, qos, + heartbeat, clock, hbrate=2.0, **kwargs): + """Fallback blocking event loop for transports that doesn't support AIO.""" + + on_task_received = obj.create_task_handler() + consumer.register_callback(on_task_received) + consumer.consume() + + obj.on_ready() + + while blueprint.state == RUN and obj.connection: + state.maybe_shutdown() + if qos.prev != qos.value: + qos.update() + try: + connection.drain_events(timeout=2.0) + except socket.timeout: + pass + except socket.error: + if blueprint.state == RUN: + raise diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/pidbox.py b/thesisenv/lib/python3.6/site-packages/celery/worker/pidbox.py new file mode 100644 index 0000000..058edd4 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/worker/pidbox.py @@ -0,0 +1,116 @@ +from __future__ import absolute_import + +import socket +import threading + +from kombu.common import ignore_errors +from kombu.utils.encoding import safe_str + +from celery.datastructures import AttributeDict +from celery.utils.log import get_logger + +from . import control + +__all__ = ['Pidbox', 'gPidbox'] + +logger = get_logger(__name__) +debug, error, info = logger.debug, logger.error, logger.info + + +class Pidbox(object): + consumer = None + + def __init__(self, c): + self.c = c + self.hostname = c.hostname + self.node = c.app.control.mailbox.Node( + safe_str(c.hostname), + handlers=control.Panel.data, + state=AttributeDict(app=c.app, hostname=c.hostname, consumer=c), + ) + self._forward_clock = self.c.app.clock.forward + + def on_message(self, body, message): + # just increase clock as clients usually don't + # have a valid clock to adjust with. + self._forward_clock() + try: + self.node.handle_message(body, message) + except KeyError as exc: + error('No such control command: %s', exc) + except Exception as exc: + error('Control command error: %r', exc, exc_info=True) + self.reset() + + def start(self, c): + self.node.channel = c.connection.channel() + self.consumer = self.node.listen(callback=self.on_message) + self.consumer.on_decode_error = c.on_decode_error + + def on_stop(self): + pass + + def stop(self, c): + self.on_stop() + self.consumer = self._close_channel(c) + + def reset(self): + """Sets up the process mailbox.""" + self.stop(self.c) + self.start(self.c) + + def _close_channel(self, c): + if self.node and self.node.channel: + ignore_errors(c, self.node.channel.close) + + def shutdown(self, c): + self.on_stop() + if self.consumer: + debug('Canceling broadcast consumer...') + ignore_errors(c, self.consumer.cancel) + self.stop(self.c) + + +class gPidbox(Pidbox): + _node_shutdown = None + _node_stopped = None + _resets = 0 + + def start(self, c): + c.pool.spawn_n(self.loop, c) + + def on_stop(self): + if self._node_stopped: + self._node_shutdown.set() + debug('Waiting for broadcast thread to shutdown...') + self._node_stopped.wait() + self._node_stopped = self._node_shutdown = None + + def reset(self): + self._resets += 1 + + def _do_reset(self, c, connection): + self._close_channel(c) + self.node.channel = connection.channel() + self.consumer = self.node.listen(callback=self.on_message) + self.consumer.consume() + + def loop(self, c): + resets = [self._resets] + shutdown = self._node_shutdown = threading.Event() + stopped = self._node_stopped = threading.Event() + try: + with c.connect() as connection: + + info('pidbox: Connected to %s.', connection.as_uri()) + self._do_reset(c, connection) + while not shutdown.is_set() and c.connection: + if resets[0] < self._resets: + resets[0] += 1 + self._do_reset(c, connection) + try: + connection.drain_events(timeout=1.0) + except socket.timeout: + pass + finally: + stopped.set() diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/request.py b/thesisenv/lib/python3.6/site-packages/celery/worker/request.py new file mode 100644 index 0000000..8a65701 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/worker/request.py @@ -0,0 +1,536 @@ +# -*- coding: utf-8 -*- +"""This module defines the :class:`Request` class, that specifies +how tasks are executed.""" +from __future__ import absolute_import, unicode_literals + +import logging +import sys + +from datetime import datetime +from weakref import ref + +from billiard.common import TERM_SIGNAME +from kombu.utils.encoding import safe_repr, safe_str +from kombu.utils.objects import cached_property + +from celery import signals +from celery.app.trace import trace_task, trace_task_ret +from celery.exceptions import ( + Ignore, TaskRevokedError, InvalidTaskError, + SoftTimeLimitExceeded, TimeLimitExceeded, + WorkerLostError, Terminated, Retry, Reject, +) +from celery.five import python_2_unicode_compatible, string +from celery.platforms import signals as _signals +from celery.utils.functional import maybe, noop +from celery.utils.log import get_logger +from celery.utils.nodenames import gethostname +from celery.utils.time import maybe_iso8601, timezone, maybe_make_aware +from celery.utils.serialization import get_pickled_exception + +from . import state + +__all__ = ['Request'] + +IS_PYPY = hasattr(sys, 'pypy_version_info') + +logger = get_logger(__name__) +debug, info, warn, error = (logger.debug, logger.info, + logger.warning, logger.error) +_does_info = False +_does_debug = False + + +def __optimize__(): + # this is also called by celery.app.trace.setup_worker_optimizations + global _does_debug + global _does_info + _does_debug = logger.isEnabledFor(logging.DEBUG) + _does_info = logger.isEnabledFor(logging.INFO) +__optimize__() + +# Localize +tz_or_local = timezone.tz_or_local +send_revoked = signals.task_revoked.send + +task_accepted = state.task_accepted +task_ready = state.task_ready +revoked_tasks = state.revoked + + +@python_2_unicode_compatible +class Request(object): + """A request for task execution.""" + acknowledged = False + time_start = None + worker_pid = None + time_limits = (None, None) + _already_revoked = False + _terminate_on_ack = None + _apply_result = None + _tzlocal = None + + if not IS_PYPY: # pragma: no cover + __slots__ = ( + 'app', 'type', 'name', 'id', 'root_id', 'parent_id', + 'on_ack', 'body', 'hostname', 'eventer', 'connection_errors', + 'task', 'eta', 'expires', 'request_dict', 'on_reject', 'utc', + 'content_type', 'content_encoding', 'argsrepr', 'kwargsrepr', + '_decoded', + '__weakref__', '__dict__', + ) + + def __init__(self, message, on_ack=noop, + hostname=None, eventer=None, app=None, + connection_errors=None, request_dict=None, + task=None, on_reject=noop, body=None, + headers=None, decoded=False, utc=True, + maybe_make_aware=maybe_make_aware, + maybe_iso8601=maybe_iso8601, **opts): + if headers is None: + headers = message.headers + if body is None: + body = message.body + self.app = app + self.message = message + self.body = body + self.utc = utc + self._decoded = decoded + if decoded: + self.content_type = self.content_encoding = None + else: + self.content_type, self.content_encoding = ( + message.content_type, message.content_encoding, + ) + + self.id = headers['id'] + type = self.type = self.name = headers['task'] + self.root_id = headers.get('root_id') + self.parent_id = headers.get('parent_id') + if 'shadow' in headers: + self.name = headers['shadow'] or self.name + if 'timelimit' in headers: + self.time_limits = headers['timelimit'] + self.argsrepr = headers.get('argsrepr', '') + self.kwargsrepr = headers.get('kwargsrepr', '') + self.on_ack = on_ack + self.on_reject = on_reject + self.hostname = hostname or gethostname() + self.eventer = eventer + self.connection_errors = connection_errors or () + self.task = task or self.app.tasks[type] + + # timezone means the message is timezone-aware, and the only timezone + # supported at this point is UTC. + eta = headers.get('eta') + if eta is not None: + try: + eta = maybe_iso8601(eta) + except (AttributeError, ValueError, TypeError) as exc: + raise InvalidTaskError( + 'invalid ETA value {0!r}: {1}'.format(eta, exc)) + self.eta = maybe_make_aware(eta, self.tzlocal) + else: + self.eta = None + + expires = headers.get('expires') + if expires is not None: + try: + expires = maybe_iso8601(expires) + except (AttributeError, ValueError, TypeError) as exc: + raise InvalidTaskError( + 'invalid expires value {0!r}: {1}'.format(expires, exc)) + self.expires = maybe_make_aware(expires, self.tzlocal) + else: + self.expires = None + + delivery_info = message.delivery_info or {} + properties = message.properties or {} + headers.update({ + 'reply_to': properties.get('reply_to'), + 'correlation_id': properties.get('correlation_id'), + 'delivery_info': { + 'exchange': delivery_info.get('exchange'), + 'routing_key': delivery_info.get('routing_key'), + 'priority': properties.get('priority'), + 'redelivered': delivery_info.get('redelivered'), + } + + }) + self.request_dict = headers + + @property + def delivery_info(self): + return self.request_dict['delivery_info'] + + def execute_using_pool(self, pool, **kwargs): + """Used by the worker to send this task to the pool. + + Arguments: + pool (~celery.concurrency.base.TaskPool): The execution pool + used to execute this request. + + Raises: + celery.exceptions.TaskRevokedError: if the task was revoked. + """ + task_id = self.id + task = self.task + if self.revoked(): + raise TaskRevokedError(task_id) + + time_limit, soft_time_limit = self.time_limits + result = pool.apply_async( + trace_task_ret, + args=(self.type, task_id, self.request_dict, self.body, + self.content_type, self.content_encoding), + accept_callback=self.on_accepted, + timeout_callback=self.on_timeout, + callback=self.on_success, + error_callback=self.on_failure, + soft_timeout=soft_time_limit or task.soft_time_limit, + timeout=time_limit or task.time_limit, + correlation_id=task_id, + ) + # cannot create weakref to None + self._apply_result = maybe(ref, result) + return result + + def execute(self, loglevel=None, logfile=None): + """Execute the task in a :func:`~celery.app.trace.trace_task`. + + Arguments: + loglevel (int): The loglevel used by the task. + logfile (str): The logfile used by the task. + """ + if self.revoked(): + return + + # acknowledge task as being processed. + if not self.task.acks_late: + self.acknowledge() + + request = self.request_dict + args, kwargs, embed = self._payload + request.update({'loglevel': loglevel, 'logfile': logfile, + 'hostname': self.hostname, 'is_eager': False, + 'args': args, 'kwargs': kwargs}, **embed or {}) + retval = trace_task(self.task, self.id, args, kwargs, request, + hostname=self.hostname, loader=self.app.loader, + app=self.app)[0] + self.acknowledge() + return retval + + def maybe_expire(self): + """If expired, mark the task as revoked.""" + if self.expires: + now = datetime.now(self.expires.tzinfo) + if now > self.expires: + revoked_tasks.add(self.id) + return True + + def terminate(self, pool, signal=None): + signal = _signals.signum(signal or TERM_SIGNAME) + if self.time_start: + pool.terminate_job(self.worker_pid, signal) + self._announce_revoked('terminated', True, signal, False) + else: + self._terminate_on_ack = pool, signal + if self._apply_result is not None: + obj = self._apply_result() # is a weakref + if obj is not None: + obj.terminate(signal) + + def _announce_revoked(self, reason, terminated, signum, expired): + task_ready(self) + self.send_event('task-revoked', + terminated=terminated, signum=signum, expired=expired) + self.task.backend.mark_as_revoked( + self.id, reason, request=self, store_result=self.store_errors, + ) + self.acknowledge() + self._already_revoked = True + send_revoked(self.task, request=self, + terminated=terminated, signum=signum, expired=expired) + + def revoked(self): + """If revoked, skip task and mark state.""" + expired = False + if self._already_revoked: + return True + if self.expires: + expired = self.maybe_expire() + if self.id in revoked_tasks: + info('Discarding revoked task: %s[%s]', self.name, self.id) + self._announce_revoked( + 'expired' if expired else 'revoked', False, None, expired, + ) + return True + return False + + def send_event(self, type, **fields): + if self.eventer and self.eventer.enabled and self.task.send_events: + self.eventer.send(type, uuid=self.id, **fields) + + def on_accepted(self, pid, time_accepted): + """Handler called when task is accepted by worker pool.""" + self.worker_pid = pid + self.time_start = time_accepted + task_accepted(self) + if not self.task.acks_late: + self.acknowledge() + self.send_event('task-started') + if _does_debug: + debug('Task accepted: %s[%s] pid:%r', self.name, self.id, pid) + if self._terminate_on_ack is not None: + self.terminate(*self._terminate_on_ack) + + def on_timeout(self, soft, timeout): + """Handler called if the task times out.""" + task_ready(self) + if soft: + warn('Soft time limit (%ss) exceeded for %s[%s]', + soft, self.name, self.id) + exc = SoftTimeLimitExceeded(soft) + else: + error('Hard time limit (%ss) exceeded for %s[%s]', + timeout, self.name, self.id) + exc = TimeLimitExceeded(timeout) + + self.task.backend.mark_as_failure( + self.id, exc, request=self, store_result=self.store_errors, + ) + + if self.task.acks_late: + self.acknowledge() + + def on_success(self, failed__retval__runtime, **kwargs): + """Handler called if the task was successfully processed.""" + failed, retval, runtime = failed__retval__runtime + if failed: + if isinstance(retval.exception, (SystemExit, KeyboardInterrupt)): + raise retval.exception + return self.on_failure(retval, return_ok=True) + task_ready(self) + + if self.task.acks_late: + self.acknowledge() + + self.send_event('task-succeeded', result=retval, runtime=runtime) + + def on_retry(self, exc_info): + """Handler called if the task should be retried.""" + if self.task.acks_late: + self.acknowledge() + + self.send_event('task-retried', + exception=safe_repr(exc_info.exception.exc), + traceback=safe_str(exc_info.traceback)) + + def on_failure(self, exc_info, send_failed_event=True, return_ok=False): + """Handler called if the task raised an exception.""" + task_ready(self) + if isinstance(exc_info.exception, MemoryError): + raise MemoryError('Process got: %s' % (exc_info.exception,)) + elif isinstance(exc_info.exception, Reject): + return self.reject(requeue=exc_info.exception.requeue) + elif isinstance(exc_info.exception, Ignore): + return self.acknowledge() + + exc = exc_info.exception + + if isinstance(exc, Retry): + return self.on_retry(exc_info) + + # These are special cases where the process wouldn't've had + # time to write the result. + if isinstance(exc, Terminated): + self._announce_revoked( + 'terminated', True, string(exc), False) + send_failed_event = False # already sent revoked event + elif isinstance(exc, WorkerLostError) or not return_ok: + self.task.backend.mark_as_failure( + self.id, exc, request=self, store_result=self.store_errors, + ) + # (acks_late) acknowledge after result stored. + if self.task.acks_late: + requeue = self.delivery_info.get('redelivered', None) is False + reject = ( + self.task.reject_on_worker_lost and + isinstance(exc, WorkerLostError) + ) + if reject: + self.reject(requeue=requeue) + send_failed_event = False + else: + self.acknowledge() + + if send_failed_event: + self.send_event( + 'task-failed', + exception=safe_repr(get_pickled_exception(exc_info.exception)), + traceback=exc_info.traceback, + ) + + if not return_ok: + error('Task handler raised error: %r', exc, + exc_info=exc_info.exc_info) + + def acknowledge(self): + """Acknowledge task.""" + if not self.acknowledged: + self.on_ack(logger, self.connection_errors) + self.acknowledged = True + + def reject(self, requeue=False): + if not self.acknowledged: + self.on_reject(logger, self.connection_errors, requeue) + self.acknowledged = True + self.send_event('task-rejected', requeue=requeue) + + def info(self, safe=False): + return { + 'id': self.id, + 'name': self.name, + 'args': self.argsrepr, + 'kwargs': self.kwargsrepr, + 'type': self.type, + 'body': self.body, + 'hostname': self.hostname, + 'time_start': self.time_start, + 'acknowledged': self.acknowledged, + 'delivery_info': self.delivery_info, + 'worker_pid': self.worker_pid, + } + + def __str__(self): + return ' '.join([ + self.humaninfo(), + ' ETA:[{0}]'.format(self.eta) if self.eta else '', + ' expires:[{0}]'.format(self.expires) if self.expires else '', + ]) + + def humaninfo(self): + return '{0.name}[{0.id}]'.format(self) + + def __repr__(self): + return '<{0}: {1} {2} {3}>'.format( + type(self).__name__, self.humaninfo(), + self.argsrepr, self.kwargsrepr, + ) + + @property + def tzlocal(self): + if self._tzlocal is None: + self._tzlocal = self.app.conf.timezone + return self._tzlocal + + @property + def store_errors(self): + return (not self.task.ignore_result or + self.task.store_errors_even_if_ignored) + + @property + def task_id(self): + # XXX compat + return self.id + + @task_id.setter # noqa + def task_id(self, value): + self.id = value + + @property + def task_name(self): + # XXX compat + return self.name + + @task_name.setter # noqa + def task_name(self, value): + self.name = value + + @property + def reply_to(self): + # used by rpc backend when failures reported by parent process + return self.request_dict['reply_to'] + + @property + def correlation_id(self): + # used similarly to reply_to + return self.request_dict['correlation_id'] + + @cached_property + def _payload(self): + return self.body if self._decoded else self.message.payload + + @cached_property + def chord(self): + # used by backend.mark_as_failure when failure is reported + # by parent process + _, _, embed = self._payload + return embed.get('chord') + + @cached_property + def errbacks(self): + # used by backend.mark_as_failure when failure is reported + # by parent process + _, _, embed = self._payload + return embed.get('errbacks') + + @cached_property + def group(self): + # used by backend.on_chord_part_return when failures reported + # by parent process + return self.request_dict['group'] + + +def create_request_cls(base, task, pool, hostname, eventer, + ref=ref, revoked_tasks=revoked_tasks, + task_ready=task_ready): + from celery.app.trace import trace_task_ret as trace + default_time_limit = task.time_limit + default_soft_time_limit = task.soft_time_limit + apply_async = pool.apply_async + acks_late = task.acks_late + events = eventer and eventer.enabled + + class Request(base): + + def execute_using_pool(self, pool, **kwargs): + task_id = self.id + if (self.expires or task_id in revoked_tasks) and self.revoked(): + raise TaskRevokedError(task_id) + + time_limit, soft_time_limit = self.time_limits + result = apply_async( + trace, + args=(self.type, task_id, self.request_dict, self.body, + self.content_type, self.content_encoding), + accept_callback=self.on_accepted, + timeout_callback=self.on_timeout, + callback=self.on_success, + error_callback=self.on_failure, + soft_timeout=soft_time_limit or default_soft_time_limit, + timeout=time_limit or default_time_limit, + correlation_id=task_id, + ) + # cannot create weakref to None + self._apply_result = maybe(ref, result) + return result + + def on_success(self, failed__retval__runtime, **kwargs): + failed, retval, runtime = failed__retval__runtime + if failed: + if isinstance(retval.exception, ( + SystemExit, KeyboardInterrupt)): + raise retval.exception + return self.on_failure(retval, return_ok=True) + task_ready(self) + + if acks_late: + self.acknowledge() + + if events: + self.send_event( + 'task-succeeded', result=retval, runtime=runtime, + ) + + return Request diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/state.py b/thesisenv/lib/python3.6/site-packages/celery/worker/state.py new file mode 100644 index 0000000..1aa4cbc --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/worker/state.py @@ -0,0 +1,246 @@ +# -*- coding: utf-8 -*- +""" + celery.worker.state + ~~~~~~~~~~~~~~~~~~~ + + Internal worker state (global) + + This includes the currently active and reserved tasks, + statistics, and revoked tasks. + +""" +from __future__ import absolute_import + +import os +import sys +import platform +import shelve +import zlib + +from kombu.serialization import pickle, pickle_protocol +from kombu.utils import cached_property + +from celery import __version__ +from celery.datastructures import LimitedSet +from celery.exceptions import WorkerShutdown, WorkerTerminate +from celery.five import Counter + +__all__ = ['SOFTWARE_INFO', 'reserved_requests', 'active_requests', + 'total_count', 'revoked', 'task_reserved', 'maybe_shutdown', + 'task_accepted', 'task_ready', 'task_reserved', 'task_ready', + 'Persistent'] + +#: Worker software/platform information. +SOFTWARE_INFO = {'sw_ident': 'py-celery', + 'sw_ver': __version__, + 'sw_sys': platform.system()} + +#: maximum number of revokes to keep in memory. +REVOKES_MAX = 50000 + +#: how many seconds a revoke will be active before +#: being expired when the max limit has been exceeded. +REVOKE_EXPIRES = 10800 + +#: set of all reserved :class:`~celery.worker.job.Request`'s. +reserved_requests = set() + +#: set of currently active :class:`~celery.worker.job.Request`'s. +active_requests = set() + +#: count of tasks accepted by the worker, sorted by type. +total_count = Counter() + +#: count of all tasks accepted by the worker +all_total_count = [0] + +#: the list of currently revoked tasks. Persistent if statedb set. +revoked = LimitedSet(maxlen=REVOKES_MAX, expires=REVOKE_EXPIRES) + +#: Update global state when a task has been reserved. +task_reserved = reserved_requests.add + +should_stop = False +should_terminate = False + + +def reset_state(): + reserved_requests.clear() + active_requests.clear() + total_count.clear() + all_total_count[:] = [0] + revoked.clear() + + +def maybe_shutdown(): + if should_stop: + raise WorkerShutdown() + elif should_terminate: + raise WorkerTerminate() + + +def task_accepted(request, _all_total_count=all_total_count): + """Updates global state when a task has been accepted.""" + active_requests.add(request) + total_count[request.name] += 1 + all_total_count[0] += 1 + + +def task_ready(request): + """Updates global state when a task is ready.""" + active_requests.discard(request) + reserved_requests.discard(request) + + +C_BENCH = os.environ.get('C_BENCH') or os.environ.get('CELERY_BENCH') +C_BENCH_EVERY = int(os.environ.get('C_BENCH_EVERY') or + os.environ.get('CELERY_BENCH_EVERY') or 1000) +if C_BENCH: # pragma: no cover + import atexit + + from billiard import current_process + from celery.five import monotonic + from celery.utils.debug import memdump, sample_mem + + all_count = 0 + bench_first = None + bench_start = None + bench_last = None + bench_every = C_BENCH_EVERY + bench_sample = [] + __reserved = task_reserved + __ready = task_ready + + if current_process()._name == 'MainProcess': + @atexit.register + def on_shutdown(): + if bench_first is not None and bench_last is not None: + print('- Time spent in benchmark: {0!r}'.format( + bench_last - bench_first)) + print('- Avg: {0}'.format( + sum(bench_sample) / len(bench_sample))) + memdump() + + def task_reserved(request): # noqa + global bench_start + global bench_first + now = None + if bench_start is None: + bench_start = now = monotonic() + if bench_first is None: + bench_first = now + + return __reserved(request) + + def task_ready(request): # noqa + global all_count + global bench_start + global bench_last + all_count += 1 + if not all_count % bench_every: + now = monotonic() + diff = now - bench_start + print('- Time spent processing {0} tasks (since first ' + 'task received): ~{1:.4f}s\n'.format(bench_every, diff)) + sys.stdout.flush() + bench_start = bench_last = now + bench_sample.append(diff) + sample_mem() + return __ready(request) + + +class Persistent(object): + """This is the persistent data stored by the worker when + :option:`--statedb` is enabled. + + It currently only stores revoked task id's. + + """ + storage = shelve + protocol = pickle_protocol + compress = zlib.compress + decompress = zlib.decompress + _is_open = False + + def __init__(self, state, filename, clock=None): + self.state = state + self.filename = filename + self.clock = clock + self.merge() + + def open(self): + return self.storage.open( + self.filename, protocol=self.protocol, writeback=True, + ) + + def merge(self): + self._merge_with(self.db) + + def sync(self): + self._sync_with(self.db) + self.db.sync() + + def close(self): + if self._is_open: + self.db.close() + self._is_open = False + + def save(self): + self.sync() + self.close() + + def _merge_with(self, d): + self._merge_revoked(d) + self._merge_clock(d) + return d + + def _sync_with(self, d): + self._revoked_tasks.purge() + d.update( + __proto__=3, + zrevoked=self.compress(self._dumps(self._revoked_tasks)), + clock=self.clock.forward() if self.clock else 0, + ) + return d + + def _merge_clock(self, d): + if self.clock: + d['clock'] = self.clock.adjust(d.get('clock') or 0) + + def _merge_revoked(self, d): + try: + self._merge_revoked_v3(d['zrevoked']) + except KeyError: + try: + self._merge_revoked_v2(d.pop('revoked')) + except KeyError: + pass + # purge expired items at boot + self._revoked_tasks.purge() + + def _merge_revoked_v3(self, zrevoked): + if zrevoked: + self._revoked_tasks.update(pickle.loads(self.decompress(zrevoked))) + + def _merge_revoked_v2(self, saved): + if not isinstance(saved, LimitedSet): + # (pre 3.0.18) used to be stored as a dict + return self._merge_revoked_v1(saved) + self._revoked_tasks.update(saved) + + def _merge_revoked_v1(self, saved): + add = self._revoked_tasks.add + for item in saved: + add(item) + + def _dumps(self, obj): + return pickle.dumps(obj, protocol=self.protocol) + + @property + def _revoked_tasks(self): + return self.state.revoked + + @cached_property + def db(self): + self._is_open = True + return self.open() diff --git a/thesisenv/lib/python3.6/site-packages/celery/worker/strategy.py b/thesisenv/lib/python3.6/site-packages/celery/worker/strategy.py new file mode 100644 index 0000000..da69b43 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/celery/worker/strategy.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +""" + celery.worker.strategy + ~~~~~~~~~~~~~~~~~~~~~~ + + Task execution strategy (optimization). + +""" +from __future__ import absolute_import + +import logging + +from kombu.async.timer import to_timestamp +from kombu.utils.encoding import safe_repr + +from celery.utils.log import get_logger +from celery.utils.timeutils import timezone + +from .job import Request +from .state import task_reserved + +__all__ = ['default'] + +logger = get_logger(__name__) + + +def default(task, app, consumer, + info=logger.info, error=logger.error, task_reserved=task_reserved, + to_system_tz=timezone.to_system): + Req = Request + hostname = consumer.hostname + connection_errors = consumer.connection_errors + _does_info = logger.isEnabledFor(logging.INFO) + + # task event related + # (optimized to avoid calling request.send_event) + eventer = consumer.event_dispatcher + events = eventer and eventer.enabled + send_event = eventer.send + task_sends_events = events and task.send_events + + call_at = consumer.timer.call_at + apply_eta_task = consumer.apply_eta_task + rate_limits_enabled = not consumer.disable_rate_limits + get_bucket = consumer.task_buckets.__getitem__ + handle = consumer.on_task_request + limit_task = consumer._limit_task + + def task_message_handler(message, body, ack, reject, callbacks, + to_timestamp=to_timestamp): + req = Req(body, on_ack=ack, on_reject=reject, + app=app, hostname=hostname, + eventer=eventer, task=task, + connection_errors=connection_errors, + message=message) + if req.revoked(): + return + + if _does_info: + info('Received task: %s', req) + + if task_sends_events: + send_event( + 'task-received', + uuid=req.id, name=req.name, + args=safe_repr(req.args), kwargs=safe_repr(req.kwargs), + retries=req.request_dict.get('retries', 0), + eta=req.eta and req.eta.isoformat(), + expires=req.expires and req.expires.isoformat(), + ) + + if req.eta: + try: + if req.utc: + eta = to_timestamp(to_system_tz(req.eta)) + else: + eta = to_timestamp(req.eta, timezone.local) + except OverflowError as exc: + error("Couldn't convert eta %s to timestamp: %r. Task: %r", + req.eta, exc, req.info(safe=True), exc_info=True) + req.acknowledge() + else: + consumer.qos.increment_eventually() + call_at(eta, apply_eta_task, (req, ), priority=6) + else: + if rate_limits_enabled: + bucket = get_bucket(task.name) + if bucket: + return limit_task(req, bucket, 1) + task_reserved(req) + if callbacks: + [callback() for callback in callbacks] + handle(req) + + return task_message_handler diff --git a/thesisenv/lib/python3.6/site-packages/croniter-0.3.25.dist-info/INSTALLER b/thesisenv/lib/python3.6/site-packages/croniter-0.3.25.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/croniter-0.3.25.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/thesisenv/lib/python3.6/site-packages/croniter-0.3.25.dist-info/METADATA b/thesisenv/lib/python3.6/site-packages/croniter-0.3.25.dist-info/METADATA new file mode 100644 index 0000000..f22a920 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/croniter-0.3.25.dist-info/METADATA @@ -0,0 +1,324 @@ +Metadata-Version: 2.1 +Name: croniter +Version: 0.3.25 +Summary: croniter provides iteration for datetime object with cron like format +Home-page: http://github.com/kiorky/croniter +Author: Matsumoto Taichi, kiorky +Author-email: taichino@gmail.com, kiorky@cryptelium.net +License: MIT License +Keywords: datetime,iterator,cron +Platform: UNKNOWN +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: POSIX +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* +Requires-Dist: python-dateutil + +Introduction +============ + +.. contents:: + + +croniter provides iteration for the datetime object with a cron like format. + +:: + + _ _ + ___ _ __ ___ _ __ (_) |_ ___ _ __ + / __| '__/ _ \| '_ \| | __/ _ \ '__| + | (__| | | (_) | | | | | || __/ | + \___|_| \___/|_| |_|_|\__\___|_| + + +Website: https://github.com/kiorky/croniter + +Travis badge +============= +.. image:: https://travis-ci.org/kiorky/croniter.svg?branch=master + :target: https://travis-ci.org/kiorky/croniter + +Usage +============ + +A simple example:: + + >>> from croniter import croniter + >>> from datetime import datetime + >>> base = datetime(2010, 1, 25, 4, 46) + >>> iter = croniter('*/5 * * * *', base) # every 5 minutes + >>> print(iter.get_next(datetime)) # 2010-01-25 04:50:00 + >>> print(iter.get_next(datetime)) # 2010-01-25 04:55:00 + >>> print(iter.get_next(datetime)) # 2010-01-25 05:00:00 + >>> + >>> iter = croniter('2 4 * * mon,fri', base) # 04:02 on every Monday and Friday + >>> print(iter.get_next(datetime)) # 2010-01-26 04:02:00 + >>> print(iter.get_next(datetime)) # 2010-01-30 04:02:00 + >>> print(iter.get_next(datetime)) # 2010-02-02 04:02:00 + >>> + >>> iter = croniter('2 4 1 * wed', base) # 04:02 on every Wednesday OR on 1st day of month + >>> print(iter.get_next(datetime)) # 2010-01-27 04:02:00 + >>> print(iter.get_next(datetime)) # 2010-02-01 04:02:00 + >>> print(iter.get_next(datetime)) # 2010-02-03 04:02:00 + >>> + >>> iter = croniter('2 4 1 * wed', base, day_or=False) # 04:02 on every 1st day of the month if it is a Wednesday + >>> print(iter.get_next(datetime)) # 2010-09-01 04:02:00 + >>> print(iter.get_next(datetime)) # 2010-12-01 04:02:00 + >>> print(iter.get_next(datetime)) # 2011-06-01 04:02:00 + >>> iter = croniter('0 0 * * sat#1,sun#2', base) + >>> print(iter.get_next(datetime)) # datetime.datetime(2010, 2, 6, 0, 0) + +All you need to know is how to use the constructor and the ``get_next`` +method, the signature of these methods are listed below:: + + >>> def __init__(self, cron_format, start_time=time.time(), day_or=True) + +croniter iterates along with ``cron_format`` from ``start_time``. +``cron_format`` is **min hour day month day_of_week**, you can refer to +http://en.wikipedia.org/wiki/Cron for more details. The ``day_or`` +switch is used to control how croniter handles **day** and **day_of_week** +entries. Default option is the cron behaviour, which connects those +values using **OR**. If the switch is set to False, the values are connected +using **AND**. This behaves like fcron and enables you to e.g. define a job that +executes each 2nd friday of a month by setting the days of month and the +weekday. +:: + + >>> def get_next(self, ret_type=float) + +get_next calculates the next value according to the cron expression and +returns an object of type ``ret_type``. ``ret_type`` should be a ``float`` or a +``datetime`` object. + +Supported added for ``get_prev`` method. (>= 0.2.0):: + + >>> base = datetime(2010, 8, 25) + >>> itr = croniter('0 0 1 * *', base) + >>> print(itr.get_prev(datetime)) # 2010-08-01 00:00:00 + >>> print(itr.get_prev(datetime)) # 2010-07-01 00:00:00 + >>> print(itr.get_prev(datetime)) # 2010-06-01 00:00:00 + +You can validate your crons using ``is_valid`` class method. (>= 0.3.18):: + + >>> croniter.is_valid('0 0 1 * *') # True + >>> croniter.is_valid('0 wrong_value 1 * *') # False + +About DST +========= +Be sure to init your croniter instance with a TZ aware datetime for this to work !:: + + >>> local_date = tz.localize(datetime(2017, 3, 26)) + >>> val = croniter('0 0 * * *', local_date).get_next(datetime) + +Develop this package +==================== + +:: + + git clone https://github.com/kiorky/croniter.git + cd croniter + virtualenv --no-site-packages venv + . venv/bin/activate + pip install --upgrade -r requirements/test.txt + py.test src + + +Make a new release +==================== +We use zest.fullreleaser, a great release infrastructure. + +Do and follow these instructions +:: + + . venv/bin/activate + pip install --upgrade -r requirements/release.txt + fullrelease + + +Contributors +=============== +Thanks to all who have contributed to this project! +If you have contributed and your name is not listed below please let me know. + + - mrmachine + - Hinnack + - shazow + - kiorky + - jlsandell + - mag009 + - djmitche + - GreatCombinator + - chris-baynes + - ipartola + - yuzawa-san + + +Changelog +============== + +0.3.25 (2018-08-07) +------------------- +- Pypî hygiene + [hugovk] + + +0.3.24 (2018-06-20) +------------------- +- fix `#107 `_: microsecond threshold + [kiorky] + + +0.3.23 (2018-05-23) +------------------- + +- fix `get_next` while perserving the fix of `get_prev` in 7661c2aaa + [Avikam Agur ] + + +0.3.22 (2018-05-16) +------------------- +- Don't count previous minute if now is dynamic + If the code is triggered from 5-asterisk based cron + `get_prev` based on `datetime.now()` is expected to return + current cron iteration and not previous execution. + [Igor Khrol ] + +0.3.20 (2017-11-06) +------------------- + +- More DST fixes + [Kevin Rose ] + + +0.3.19 (2017-08-31) +------------------- + +- fix #87: backward dst changes + [kiorky] + + +0.3.18 (2017-08-31) +------------------- + +- Add is valid method, refactor errors + [otherpirate, Mauro Murari ] + + +0.3.17 (2017-05-22) +------------------- +- DOW occurence sharp style support. + [kiorky, Kengo Seki ] + + +0.3.16 (2017-03-15) +------------------- + +- Better test suite [mrcrilly@github] +- DST support [kiorky] + +0.3.15 (2017-02-16) +------------------- + +- fix bug around multiple conditions and range_val in + _get_prev_nearest_diff. + [abeja-yuki@github] + +0.3.14 (2017-01-25) +------------------- + +- issue #69: added day_or option to change behavior when day-of-month and + day-of-week is given + [Andreas Vogl ] + + + +0.3.13 (2016-11-01) +------------------- + +- `Real fix for #34 `_ + [kiorky@github] +- `Modernize test infra `_ + [kiorky@github] +- `Release as a universal wheel `_ + [adamchainz@github] +- `Raise ValueError on negative numbers `_ + [josegonzalez@github] +- `Compare types using "issubclass" instead of exact match `_ + [darkk@github] +- `Implement step cron with a variable base `_ + [josegonzalez@github] + +0.3.12 (2016-03-10) +------------------- +- support setting ret_type in __init__ [Brent Tubbs ] + +0.3.11 (2016-01-13) +------------------- + +- Bug fix: The get_prev API crashed when last day of month token was used. Some + essential logic was missing. + [Iddo Aviram ] + + +0.3.10 (2015-11-29) +------------------- + +- The fuctionality of 'l' as day of month was broken, since the month variable + was not properly updated + [Iddo Aviram ] + +0.3.9 (2015-11-19) +------------------ + +- Don't use datetime functions python 2.6 doesn't support + [petervtzand] + +0.3.8 (2015-06-23) +------------------ +- Truncate microseconds by setting to 0 + [Corey Wright] + + +0.3.7 (2015-06-01) +------------------ + +- converting sun in range sun-thu transforms to int 0 which is + recognized as empty string; the solution was to convert sun to string "0" + +0.3.6 (2015-05-29) +------------------ + +- Fix default behavior when no start_time given + Default value for `start_time` parameter is calculated at module init time rather than call time. +- Fix timezone support and stop depending on the system time zone + + + +0.3.5 (2014-08-01) +------------------ + +- support for 'l' (last day of month) + + +0.3.4 (2014-01-30) +------------------ + +- Python 3 compat +- QA Relase + + +0.3.3 (2012-09-29) +------------------ +- proper packaging + + + diff --git a/thesisenv/lib/python3.6/site-packages/croniter-0.3.25.dist-info/RECORD b/thesisenv/lib/python3.6/site-packages/croniter-0.3.25.dist-info/RECORD new file mode 100644 index 0000000..94eb29b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/croniter-0.3.25.dist-info/RECORD @@ -0,0 +1,17 @@ +croniter-0.3.25.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +croniter-0.3.25.dist-info/METADATA,sha256=b471DZomHKt0E_mFldIXQdpHaVGfcrqcGUFsN6s6eMQ,9125 +croniter-0.3.25.dist-info/RECORD,, +croniter-0.3.25.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110 +croniter-0.3.25.dist-info/top_level.txt,sha256=7bhikJW4KtA3pcMcyQwozsuHNwwjqAbDEAFbMN41UaU,9 +croniter/__init__.py,sha256=TqYMvmKglBTOk8wH46D0ZaCIx-6l_8DMihnTJqp6kHM,252 +croniter/__pycache__/__init__.cpython-36.pyc,, +croniter/__pycache__/croniter.cpython-36.pyc,, +croniter/croniter.py,sha256=6H5zvprZszU8PrllqUM0cqz_0RcSa2967Z6stsRXm9w,20239 +croniter/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +croniter/tests/__pycache__/__init__.cpython-36.pyc,, +croniter/tests/__pycache__/base.cpython-36.pyc,, +croniter/tests/__pycache__/test_croniter.cpython-36.pyc,, +croniter/tests/__pycache__/test_speed.cpython-36.pyc,, +croniter/tests/base.py,sha256=FSNFhXl40vetU8uElb4T_nOyQHF4UIW7XBH-m1zb1T0,282 +croniter/tests/test_croniter.py,sha256=Vrtsq9d_lpN0ZhPjqk4oYxbXB580W36jpOTNYBaRaRc,30947 +croniter/tests/test_speed.py,sha256=2pXP2N5GTWZ6TaXReO4tpyovyKQDCU4RDfll2Z-HC9s,5715 diff --git a/thesisenv/lib/python3.6/site-packages/pip-18.0.dist-info/WHEEL b/thesisenv/lib/python3.6/site-packages/croniter-0.3.25.dist-info/WHEEL similarity index 100% rename from thesisenv/lib/python3.6/site-packages/pip-18.0.dist-info/WHEEL rename to thesisenv/lib/python3.6/site-packages/croniter-0.3.25.dist-info/WHEEL diff --git a/thesisenv/lib/python3.6/site-packages/croniter-0.3.25.dist-info/top_level.txt b/thesisenv/lib/python3.6/site-packages/croniter-0.3.25.dist-info/top_level.txt new file mode 100644 index 0000000..ead0958 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/croniter-0.3.25.dist-info/top_level.txt @@ -0,0 +1 @@ +croniter diff --git a/thesisenv/lib/python3.6/site-packages/croniter/__init__.py b/thesisenv/lib/python3.6/site-packages/croniter/__init__.py new file mode 100644 index 0000000..ead8bb8 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/croniter/__init__.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .croniter import ( + croniter, + CroniterBadDateError, # noqa + CroniterBadCronError, # noqa + CroniterNotAlphaError # noqa +) # noqa +croniter.__name__ # make flake8 happy diff --git a/thesisenv/lib/python3.6/site-packages/croniter/croniter.py b/thesisenv/lib/python3.6/site-packages/croniter/croniter.py new file mode 100644 index 0000000..3398227 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/croniter/croniter.py @@ -0,0 +1,564 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import, print_function +import re +from time import time +import datetime +from dateutil.relativedelta import relativedelta +from dateutil.tz import tzutc +import calendar + +step_search_re = re.compile(r'^([^-]+)-([^-/]+)(/(.*))?$') +search_re = re.compile(r'^([^-]+)-([^-/]+)(/(.*))?$') +only_int_re = re.compile(r'^\d+$') +any_int_re = re.compile(r'^\d+') +star_or_int_re = re.compile(r'^(\d+|\*)$') +VALID_LEN_EXPRESSION = [5, 6] + + +class CroniterError(ValueError): + pass + + +class CroniterBadCronError(CroniterError): + pass + + +class CroniterBadDateError(CroniterError): + pass + + +class CroniterNotAlphaError(CroniterError): + pass + + +class croniter(object): + MONTHS_IN_YEAR = 12 + RANGES = ( + (0, 59), + (0, 23), + (1, 31), + (1, 12), + (0, 6), + (0, 59) + ) + DAYS = ( + 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 + ) + + ALPHACONV = ( + {}, + {}, + {"l": "l"}, + {'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6, + 'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12}, + {'sun': 0, 'mon': 1, 'tue': 2, 'wed': 3, 'thu': 4, 'fri': 5, 'sat': 6}, + {} + ) + + LOWMAP = ( + {}, + {}, + {0: 1}, + {0: 1}, + {7: 0}, + {}, + ) + + bad_length = 'Exactly 5 or 6 columns has to be specified for iterator' \ + 'expression.' + + def __init__(self, expr_format, start_time=None, ret_type=float, + day_or=True): + self._ret_type = ret_type + self._day_or = day_or + + if start_time is None: + start_time = time() + + self.tzinfo = None + if isinstance(start_time, datetime.datetime): + self.tzinfo = start_time.tzinfo + # milliseconds/microseconds rounds + if start_time.microsecond: + start_time = start_time + relativedelta(seconds=1) + start_time = self._datetime_to_timestamp(start_time) + + self.start_time = start_time + self.dst_start_time = start_time + self.cur = start_time + + self.expanded, self.nth_weekday_of_month = self.expand(expr_format) + + @classmethod + def _alphaconv(cls, index, key, expressions): + try: + return cls.ALPHACONV[index][key.lower()] + except KeyError: + raise CroniterNotAlphaError( + "[{0}] is not acceptable".format(" ".join(expressions))) + + def get_next(self, ret_type=None): + return self._get_next(ret_type or self._ret_type, is_prev=False) + + def get_prev(self, ret_type=None): + return self._get_next(ret_type or self._ret_type, is_prev=True) + + def get_current(self, ret_type=None): + ret_type = ret_type or self._ret_type + if issubclass(ret_type, datetime.datetime): + return self._timestamp_to_datetime(self.cur) + return self.cur + + @classmethod + def _datetime_to_timestamp(cls, d): + """ + Converts a `datetime` object `d` into a UNIX timestamp. + """ + if d.tzinfo is not None: + d = d.replace(tzinfo=None) - d.utcoffset() + + return cls._timedelta_to_seconds(d - datetime.datetime(1970, 1, 1)) + + def _timestamp_to_datetime(self, timestamp): + """ + Converts a UNIX timestamp `timestamp` into a `datetime` object. + """ + result = datetime.datetime.utcfromtimestamp(timestamp) + if self.tzinfo: + result = result.replace(tzinfo=tzutc()).astimezone(self.tzinfo) + + return result + + @classmethod + def _timedelta_to_seconds(cls, td): + """ + Converts a 'datetime.timedelta' object `td` into seconds contained in + the duration. + Note: We cannot use `timedelta.total_seconds()` because this is not + supported by Python 2.6. + """ + return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) \ + / 10**6 + + # iterator protocol, to enable direct use of croniter + # objects in a loop, like "for dt in croniter('5 0 * * *'): ..." + # or for combining multiple croniters into single + # dates feed using 'itertools' module + def __iter__(self): + return self + __next__ = next = get_next + + def all_next(self, ret_type=None): + '''Generator of all consecutive dates. Can be used instead of + implicit call to __iter__, whenever non-default + 'ret_type' has to be specified. + ''' + while True: + yield self._get_next(ret_type or self._ret_type, is_prev=False) + + def all_prev(self, ret_type=None): + '''Generator of all previous dates.''' + while True: + yield self._get_next(ret_type or self._ret_type, is_prev=True) + + iter = all_next # alias, you can call .iter() instead of .all_next() + + def _get_next(self, ret_type=None, is_prev=False): + expanded = self.expanded[:] + nth_weekday_of_month = self.nth_weekday_of_month.copy() + + ret_type = ret_type or self._ret_type + + if not issubclass(ret_type, (float, datetime.datetime)): + raise TypeError("Invalid ret_type, only 'float' or 'datetime' " + "is acceptable.") + + # exception to support day of month and day of week as defined in cron + if (expanded[2][0] != '*' and expanded[4][0] != '*') and self._day_or: + bak = expanded[4] + expanded[4] = ['*'] + t1 = self._calc(self.cur, expanded, nth_weekday_of_month, is_prev) + expanded[4] = bak + expanded[2] = ['*'] + + t2 = self._calc(self.cur, expanded, nth_weekday_of_month, is_prev) + if not is_prev: + result = t1 if t1 < t2 else t2 + else: + result = t1 if t1 > t2 else t2 + else: + result = self._calc(self.cur, expanded, + nth_weekday_of_month, is_prev) + + # DST Handling for cron job spanning accross days + dtstarttime = self._timestamp_to_datetime(self.dst_start_time) + dtstarttime_utcoffset = ( + dtstarttime.utcoffset() or datetime.timedelta(0)) + dtresult = self._timestamp_to_datetime(result) + lag = lag_hours = 0 + # do we trigger DST on next crontab (handle backward changes) + dtresult_utcoffset = dtstarttime_utcoffset + if dtresult and self.tzinfo: + dtresult_utcoffset = dtresult.utcoffset() + lag_hours = ( + self._timedelta_to_seconds(dtresult - dtstarttime) / (60*60) + ) + lag = self._timedelta_to_seconds( + dtresult_utcoffset - dtstarttime_utcoffset + ) + hours_before_midnight = 24 - dtstarttime.hour + if dtresult_utcoffset != dtstarttime_utcoffset: + if ((lag > 0 and lag_hours >= hours_before_midnight) + or (lag < 0 and + ((3600*lag_hours+abs(lag)) >= hours_before_midnight*3600)) + ): + dtresult = dtresult - datetime.timedelta(seconds=lag) + result = self._datetime_to_timestamp(dtresult) + self.dst_start_time = result + self.cur = result + if issubclass(ret_type, datetime.datetime): + result = dtresult + return result + + def _calc(self, now, expanded, nth_weekday_of_month, is_prev): + if is_prev: + nearest_diff_method = self._get_prev_nearest_diff + sign = -1 + offset = (len(expanded) == 6 or now % 60 > 0) and 1 or 60 + else: + nearest_diff_method = self._get_next_nearest_diff + sign = 1 + offset = (len(expanded) == 6) and 1 or 60 + + dst = now = self._timestamp_to_datetime(now + sign * offset) + + month, year = dst.month, dst.year + current_year = now.year + DAYS = self.DAYS + + def proc_month(d): + if expanded[3][0] != '*': + diff_month = nearest_diff_method( + d.month, expanded[3], self.MONTHS_IN_YEAR) + days = DAYS[month - 1] + if month == 2 and self.is_leap(year) is True: + days += 1 + + reset_day = 1 + + if diff_month is not None and diff_month != 0: + if is_prev: + d += relativedelta(months=diff_month) + reset_day = DAYS[d.month - 1] + d += relativedelta( + day=reset_day, hour=23, minute=59, second=59) + else: + d += relativedelta(months=diff_month, day=reset_day, + hour=0, minute=0, second=0) + return True, d + return False, d + + def proc_day_of_month(d): + if expanded[2][0] != '*': + days = DAYS[month - 1] + if month == 2 and self.is_leap(year) is True: + days += 1 + if 'l' in expanded[2] and days == d.day: + return False, d + + if is_prev: + days_in_prev_month = DAYS[ + (month - 2) % self.MONTHS_IN_YEAR] + diff_day = nearest_diff_method( + d.day, expanded[2], days_in_prev_month) + else: + diff_day = nearest_diff_method(d.day, expanded[2], days) + + if diff_day is not None and diff_day != 0: + if is_prev: + d += relativedelta( + days=diff_day, hour=23, minute=59, second=59) + else: + d += relativedelta( + days=diff_day, hour=0, minute=0, second=0) + return True, d + return False, d + + def proc_day_of_week(d): + if expanded[4][0] != '*': + diff_day_of_week = nearest_diff_method( + d.isoweekday() % 7, expanded[4], 7) + if diff_day_of_week is not None and diff_day_of_week != 0: + if is_prev: + d += relativedelta(days=diff_day_of_week, + hour=23, minute=59, second=59) + else: + d += relativedelta(days=diff_day_of_week, + hour=0, minute=0, second=0) + return True, d + return False, d + + def proc_day_of_week_nth(d): + if '*' in nth_weekday_of_month: + s = nth_weekday_of_month['*'] + for i in range(0, 7): + if i in nth_weekday_of_month: + nth_weekday_of_month[i].update(s) + else: + nth_weekday_of_month[i] = s + del nth_weekday_of_month['*'] + + candidates = [] + for wday, nth in nth_weekday_of_month.items(): + w = (wday + 6) % 7 + c = calendar.Calendar(w).monthdayscalendar(d.year, d.month) + if c[0][0] == 0: c.pop(0) + for n in nth: + if len(c) < n: + continue + candidate = c[n - 1][0] + if ( + (is_prev and candidate <= d.day) or + (not is_prev and d.day <= candidate) + ): + candidates.append(candidate) + + if not candidates: + if is_prev: + d += relativedelta(days=-d.day, + hour=23, minute=59, second=59) + else: + days = DAYS[month - 1] + if month == 2 and self.is_leap(year) is True: + days += 1 + d += relativedelta(days=(days - d.day + 1), + hour=0, minute=0, second=0) + return True, d + + candidates.sort() + diff_day = (candidates[-1] if is_prev else candidates[0]) - d.day + if diff_day != 0: + if is_prev: + d += relativedelta(days=diff_day, + hour=23, minute=59, second=59) + else: + d += relativedelta(days=diff_day, + hour=0, minute=0, second=0) + return True, d + return False, d + + def proc_hour(d): + if expanded[1][0] != '*': + diff_hour = nearest_diff_method(d.hour, expanded[1], 24) + if diff_hour is not None and diff_hour != 0: + if is_prev: + d += relativedelta( + hours=diff_hour, minute=59, second=59) + else: + d += relativedelta(hours=diff_hour, minute=0, second=0) + return True, d + return False, d + + def proc_minute(d): + if expanded[0][0] != '*': + diff_min = nearest_diff_method(d.minute, expanded[0], 60) + if diff_min is not None and diff_min != 0: + if is_prev: + d += relativedelta(minutes=diff_min, second=59) + else: + d += relativedelta(minutes=diff_min, second=0) + return True, d + return False, d + + def proc_second(d): + if len(expanded) == 6: + if expanded[5][0] != '*': + diff_sec = nearest_diff_method(d.second, expanded[5], 60) + if diff_sec is not None and diff_sec != 0: + d += relativedelta(seconds=diff_sec) + return True, d + else: + d += relativedelta(second=0) + return False, d + + procs = [proc_month, + proc_day_of_month, + (proc_day_of_week_nth if nth_weekday_of_month + else proc_day_of_week), + proc_hour, + proc_minute, + proc_second] + + while abs(year - current_year) <= 1: + next = False + for proc in procs: + (changed, dst) = proc(dst) + if changed: + month, year = dst.month, dst.year + next = True + break + if next: + continue + return self._datetime_to_timestamp(dst.replace(microsecond=0)) + + if is_prev: + raise CroniterBadDateError("failed to find prev date") + raise CroniterBadDateError("failed to find next date") + + def _get_next_nearest(self, x, to_check): + small = [item for item in to_check if item < x] + large = [item for item in to_check if item >= x] + large.extend(small) + return large[0] + + def _get_prev_nearest(self, x, to_check): + small = [item for item in to_check if item <= x] + large = [item for item in to_check if item > x] + small.reverse() + large.reverse() + small.extend(large) + return small[0] + + def _get_next_nearest_diff(self, x, to_check, range_val): + for i, d in enumerate(to_check): + if d == "l": + # if 'l' then it is the last day of month + # => its value of range_val + d = range_val + if d >= x: + return d - x + return to_check[0] - x + range_val + + def _get_prev_nearest_diff(self, x, to_check, range_val): + candidates = to_check[:] + candidates.reverse() + for d in candidates: + if d != 'l' and d <= x: + return d - x + if 'l' in candidates: + return -x + candidate = candidates[0] + for c in candidates: + # fixed: c < range_val + # this code will reject all 31 day of month, 12 month, 59 second, + # 23 hour and so on. + # if candidates has just a element, this will not harmful. + # but candidates have multiple elements, then values equal to + # range_val will rejected. + if c <= range_val: + candidate = c + break + + return (candidate - x - range_val) + + def is_leap(self, year): + if year % 400 == 0 or (year % 4 == 0 and year % 100 != 0): + return True + else: + return False + + @classmethod + def expand(cls, expr_format): + expressions = expr_format.split() + + if len(expressions) not in VALID_LEN_EXPRESSION: + raise CroniterBadCronError(cls.bad_length) + + expanded = [] + nth_weekday_of_month = {} + + for i, expr in enumerate(expressions): + e_list = expr.split(',') + res = [] + + while len(e_list) > 0: + e = e_list.pop() + + if i == 4: + e, sep, nth = str(e).partition('#') + if nth and not re.match(r'[1-5]', nth): + raise CroniterBadDateError( + "[{0}] is not acceptable".format(expr_format)) + + t = re.sub(r'^\*(\/.+)$', r'%d-%d\1' % ( + cls.RANGES[i][0], + cls.RANGES[i][1]), + str(e)) + m = search_re.search(t) + + if not m: + t = re.sub(r'^(.+)\/(.+)$', r'\1-%d/\2' % ( + cls.RANGES[i][1]), + str(e)) + m = step_search_re.search(t) + + if m: + (low, high, step) = m.group(1), m.group(2), m.group(4) or 1 + + if not any_int_re.search(low): + low = "{0}".format(cls._alphaconv(i, low, expressions)) + + if not any_int_re.search(high): + high = "{0}".format(cls._alphaconv(i, high, expressions)) + + if ( + not low or not high or int(low) > int(high) + or not only_int_re.search(str(step)) + ): + raise CroniterBadDateError( + "[{0}] is not acceptable".format(expr_format)) + + low, high, step = map(int, [low, high, step]) + rng = range(low, high + 1, step) + e_list += (["{0}#{1}".format(item, nth) for item in rng] + if i == 4 and nth else rng) + else: + if t.startswith('-'): + raise CroniterBadCronError( + "[{0}] is not acceptable,\ + negative numbers not allowed".format( + expr_format)) + if not star_or_int_re.search(t): + t = cls._alphaconv(i, t, expressions) + + try: + t = int(t) + except: + pass + + if t in cls.LOWMAP[i]: + t = cls.LOWMAP[i][t] + + if ( + t not in ["*", "l"] + and (int(t) < cls.RANGES[i][0] or + int(t) > cls.RANGES[i][1]) + ): + raise CroniterBadCronError( + "[{0}] is not acceptable, out of range".format( + expr_format)) + + res.append(t) + + if i == 4 and nth: + if t not in nth_weekday_of_month: + nth_weekday_of_month[t] = set() + nth_weekday_of_month[t].add(int(nth)) + + res.sort() + expanded.append(['*'] if (len(res) == 1 + and res[0] == '*') + else res) + + return expanded, nth_weekday_of_month + + @classmethod + def is_valid(cls, expression): + try: + cls.expand(expression) + except CroniterError: + return False + else: + return True diff --git a/thesisenv/lib/python3.6/site-packages/croniter/tests/__init__.py b/thesisenv/lib/python3.6/site-packages/croniter/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/croniter/tests/base.py b/thesisenv/lib/python3.6/site-packages/croniter/tests/base.py new file mode 100644 index 0000000..59c49f6 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/croniter/tests/base.py @@ -0,0 +1,13 @@ +try: + import unittest2 as unittest +except ImportError: + import unittest + + +class TestCase(unittest.TestCase): + ''' + We use this base class for all the tests in this package. + If necessary, we can put common utility or setup code in here. + ''' + +# vim:set ft=python: diff --git a/thesisenv/lib/python3.6/site-packages/croniter/tests/test_croniter.py b/thesisenv/lib/python3.6/site-packages/croniter/tests/test_croniter.py new file mode 100644 index 0000000..2d23525 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/croniter/tests/test_croniter.py @@ -0,0 +1,837 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import unittest +from datetime import datetime +from time import sleep +import pytz +from croniter import croniter, CroniterBadDateError, CroniterBadCronError, CroniterNotAlphaError +from croniter.tests import base + + +class CroniterTest(base.TestCase): + + def testSecond(self): + base = datetime(2012, 4, 6, 13, 26, 10) + itr = croniter('*/1 * * * * *', base) + n1 = itr.get_next(datetime) + self.assertEqual(base.year, n1.year) + self.assertEqual(base.month, n1.month) + self.assertEqual(base.day, n1.day) + self.assertEqual(base.hour, n1.hour) + self.assertEqual(base.minute, n1.minute) + self.assertEqual(base.second + 1, n1.second) + + def testSecondRepeat(self): + base = datetime(2012, 4, 6, 13, 26, 36) + itr = croniter('* * * * * */15', base) + n1 = itr.get_next(datetime) + n2 = itr.get_next(datetime) + n3 = itr.get_next(datetime) + self.assertEqual(base.year, n1.year) + self.assertEqual(base.month, n1.month) + self.assertEqual(base.day, n1.day) + self.assertEqual(base.hour, n1.hour) + self.assertEqual(base.minute, n1.minute) + self.assertEqual(45, n1.second) + self.assertEqual(base.year, n2.year) + self.assertEqual(base.month, n2.month) + self.assertEqual(base.day, n2.day) + self.assertEqual(base.hour, n2.hour) + self.assertEqual(base.minute + 1, n2.minute) + self.assertEqual(0, n2.second) + self.assertEqual(base.year, n3.year) + self.assertEqual(base.month, n3.month) + self.assertEqual(base.day, n3.day) + self.assertEqual(base.hour, n3.hour) + self.assertEqual(base.minute + 1, n3.minute) + self.assertEqual(15, n3.second) + + def testMinute(self): + # minute asterisk + base = datetime(2010, 1, 23, 12, 18) + itr = croniter('*/1 * * * *', base) + n1 = itr.get_next(datetime) # 19 + self.assertEqual(base.year, n1.year) + self.assertEqual(base.month, n1.month) + self.assertEqual(base.day, n1.day) + self.assertEqual(base.hour, n1.hour) + self.assertEqual(base.minute, n1.minute - 1) + for i in range(39): # ~ 58 + itr.get_next() + n2 = itr.get_next(datetime) + self.assertEqual(n2.minute, 59) + n3 = itr.get_next(datetime) + self.assertEqual(n3.minute, 0) + self.assertEqual(n3.hour, 13) + + itr = croniter('*/5 * * * *', base) + n4 = itr.get_next(datetime) + self.assertEqual(n4.minute, 20) + for i in range(6): + itr.get_next() + n5 = itr.get_next(datetime) + self.assertEqual(n5.minute, 55) + n6 = itr.get_next(datetime) + self.assertEqual(n6.minute, 0) + self.assertEqual(n6.hour, 13) + + def testHour(self): + base = datetime(2010, 1, 24, 12, 2) + itr = croniter('0 */3 * * *', base) + n1 = itr.get_next(datetime) + self.assertEqual(n1.hour, 15) + self.assertEqual(n1.minute, 0) + for i in range(2): + itr.get_next() + n2 = itr.get_next(datetime) + self.assertEqual(n2.hour, 0) + self.assertEqual(n2.day, 25) + + def testDay(self): + base = datetime(2010, 2, 24, 12, 9) + itr = croniter('0 0 */3 * *', base) + n1 = itr.get_next(datetime) + # 1 4 7 10 13 16 19 22 25 28 + self.assertEqual(n1.day, 25) + n2 = itr.get_next(datetime) + self.assertEqual(n2.day, 28) + n3 = itr.get_next(datetime) + self.assertEqual(n3.day, 1) + self.assertEqual(n3.month, 3) + + # test leap year + base = datetime(1996, 2, 27) + itr = croniter('0 0 * * *', base) + n1 = itr.get_next(datetime) + self.assertEqual(n1.day, 28) + self.assertEqual(n1.month, 2) + n2 = itr.get_next(datetime) + self.assertEqual(n2.day, 29) + self.assertEqual(n2.month, 2) + + base2 = datetime(2000, 2, 27) + itr2 = croniter('0 0 * * *', base2) + n3 = itr2.get_next(datetime) + self.assertEqual(n3.day, 28) + self.assertEqual(n3.month, 2) + n4 = itr2.get_next(datetime) + self.assertEqual(n4.day, 29) + self.assertEqual(n4.month, 2) + + def testWeekDay(self): + base = datetime(2010, 2, 25) + itr = croniter('0 0 * * sat', base) + n1 = itr.get_next(datetime) + self.assertEqual(n1.isoweekday(), 6) + self.assertEqual(n1.day, 27) + n2 = itr.get_next(datetime) + self.assertEqual(n2.isoweekday(), 6) + self.assertEqual(n2.day, 6) + self.assertEqual(n2.month, 3) + + base = datetime(2010, 1, 25) + itr = croniter('0 0 1 * wed', base) + n1 = itr.get_next(datetime) + self.assertEqual(n1.month, 1) + self.assertEqual(n1.day, 27) + self.assertEqual(n1.year, 2010) + n2 = itr.get_next(datetime) + self.assertEqual(n2.month, 2) + self.assertEqual(n2.day, 1) + self.assertEqual(n2.year, 2010) + n3 = itr.get_next(datetime) + self.assertEqual(n3.month, 2) + self.assertEqual(n3.day, 3) + self.assertEqual(n3.year, 2010) + + def testNthWeekDay(self): + base = datetime(2010, 2, 25) + itr = croniter('0 0 * * sat#1', base) + n1 = itr.get_next(datetime) + self.assertEqual(n1.isoweekday(), 6) + self.assertEqual(n1.day, 6) + self.assertEqual(n1.month, 3) + n2 = itr.get_next(datetime) + self.assertEqual(n2.isoweekday(), 6) + self.assertEqual(n2.day, 3) + self.assertEqual(n2.month, 4) + + base = datetime(2010, 1, 25) + itr = croniter('0 0 * * wed#5', base) + n1 = itr.get_next(datetime) + self.assertEqual(n1.month, 3) + self.assertEqual(n1.day, 31) + self.assertEqual(n1.year, 2010) + n2 = itr.get_next(datetime) + self.assertEqual(n2.month, 6) + self.assertEqual(n2.day, 30) + self.assertEqual(n2.year, 2010) + n3 = itr.get_next(datetime) + self.assertEqual(n3.month, 9) + self.assertEqual(n3.day, 29) + self.assertEqual(n3.year, 2010) + + def testWeekDayDayAnd(self): + base = datetime(2010, 1, 25) + itr = croniter('0 0 1 * mon', base, day_or=False) + n1 = itr.get_next(datetime) + self.assertEqual(n1.month, 2) + self.assertEqual(n1.day, 1) + self.assertEqual(n1.year, 2010) + n2 = itr.get_next(datetime) + self.assertEqual(n2.month, 3) + self.assertEqual(n2.day, 1) + self.assertEqual(n2.year, 2010) + n3 = itr.get_next(datetime) + self.assertEqual(n3.month, 11) + self.assertEqual(n3.day, 1) + self.assertEqual(n3.year, 2010) + + def testMonth(self): + base = datetime(2010, 1, 25) + itr = croniter('0 0 1 * *', base) + n1 = itr.get_next(datetime) + self.assertEqual(n1.month, 2) + self.assertEqual(n1.day, 1) + n2 = itr.get_next(datetime) + self.assertEqual(n2.month, 3) + self.assertEqual(n2.day, 1) + for i in range(8): + itr.get_next() + n3 = itr.get_next(datetime) + self.assertEqual(n3.month, 12) + self.assertEqual(n3.year, 2010) + n4 = itr.get_next(datetime) + self.assertEqual(n4.month, 1) + self.assertEqual(n4.year, 2011) + + def testLastDayOfMonth(self): + base = datetime(2015, 9, 4) + itr = croniter('0 0 l * *', base) + n1 = itr.get_next(datetime) + self.assertEqual(n1.month, 9) + self.assertEqual(n1.day, 30) + n2 = itr.get_next(datetime) + self.assertEqual(n2.month, 10) + self.assertEqual(n2.day, 31) + n3 = itr.get_next(datetime) + self.assertEqual(n3.month, 11) + self.assertEqual(n3.day, 30) + n4 = itr.get_next(datetime) + self.assertEqual(n4.month, 12) + self.assertEqual(n4.day, 31) + + def testPrevLastDayOfMonth(self): + base = datetime(2009, 12, 31, hour=20) + itr = croniter('0 0 l * *', base) + n1 = itr.get_prev(datetime) + self.assertEqual(n1.month, 12) + self.assertEqual(n1.day, 31) + + base = datetime(2009, 12, 31) + itr = croniter('0 0 l * *', base) + n1 = itr.get_prev(datetime) + self.assertEqual(n1.month, 11) + self.assertEqual(n1.day, 30) + + base = datetime(2010, 1, 5) + itr = croniter('0 0 l * *', base) + n1 = itr.get_prev(datetime) + self.assertEqual(n1.month, 12) + self.assertEqual(n1.day, 31) + n1 = itr.get_prev(datetime) + self.assertEqual(n1.month, 11) + self.assertEqual(n1.day, 30) + n1 = itr.get_prev(datetime) + self.assertEqual(n1.month, 10) + self.assertEqual(n1.day, 31) + n1 = itr.get_prev(datetime) + self.assertEqual(n1.month, 9) + self.assertEqual(n1.day, 30) + + base = datetime(2010, 1, 31, minute=2) + itr = croniter('* * l * *', base) + n1 = itr.get_prev(datetime) + self.assertEqual(n1.month, 1) + self.assertEqual(n1.day, 31) + n1 = itr.get_prev(datetime) + self.assertEqual(n1.month, 1) + self.assertEqual(n1.day, 31) + n1 = itr.get_prev(datetime) + self.assertEqual(n1.month, 12) + self.assertEqual(n1.day, 31) + n1 = itr.get_prev(datetime) + self.assertEqual(n1.month, 12) + self.assertEqual(n1.day, 31) + + def testError(self): + itr = croniter('* * * * *') + self.assertRaises(TypeError, itr.get_next, str) + self.assertRaises(ValueError, croniter, '* * * *') + self.assertRaises(ValueError, croniter, '* * 5-1 * *') + self.assertRaises(ValueError, croniter, '-90 * * * *') + self.assertRaises(ValueError, croniter, 'a * * * *') + self.assertRaises(ValueError, croniter, '* * * janu-jun *') + + def testSundayToThursdayWithAlphaConversion(self): + base = datetime(2010, 8, 25, 15, 56) # wednesday + itr = croniter("30 22 * * sun-thu", base) + next = itr.get_next(datetime) + + self.assertEqual(base.year, next.year) + self.assertEqual(base.month, next.month) + self.assertEqual(base.day, next.day) + self.assertEqual(22, next.hour) + self.assertEqual(30, next.minute) + + def testPrevMinute(self): + base = datetime(2010, 8, 25, 15, 56) + itr = croniter('*/1 * * * *', base) + prev = itr.get_prev(datetime) + self.assertEqual(base.year, prev.year) + self.assertEqual(base.month, prev.month) + self.assertEqual(base.day, prev.day) + self.assertEqual(base.hour, prev.hour) + self.assertEqual(base.minute, prev.minute + 1) + + base = datetime(2010, 8, 25, 15, 0) + itr = croniter('*/1 * * * *', base) + prev = itr.get_prev(datetime) + self.assertEqual(base.year, prev.year) + self.assertEqual(base.month, prev.month) + self.assertEqual(base.day, prev.day) + self.assertEqual(base.hour, prev.hour + 1) + self.assertEqual(59, prev.minute) + + base = datetime(2010, 8, 25, 0, 0) + itr = croniter('*/1 * * * *', base) + prev = itr.get_prev(datetime) + self.assertEqual(base.year, prev.year) + self.assertEqual(base.month, prev.month) + self.assertEqual(base.day, prev.day + 1) + self.assertEqual(23, prev.hour) + self.assertEqual(59, prev.minute) + + def testPrevDayOfMonthWithCrossing(self): + """ + Test getting previous occurrence that crosses into previous month. + """ + base = datetime(2012, 3, 15, 0, 0) + itr = croniter('0 0 22 * *', base) + prev = itr.get_prev(datetime) + self.assertEqual(prev.year, 2012) + self.assertEqual(prev.month, 2) + self.assertEqual(prev.day, 22) + self.assertEqual(prev.hour, 0) + self.assertEqual(prev.minute, 0) + + def testPrevWeekDay(self): + base = datetime(2010, 8, 25, 15, 56) + itr = croniter('0 0 * * sat,sun', base) + prev1 = itr.get_prev(datetime) + self.assertEqual(prev1.year, base.year) + self.assertEqual(prev1.month, base.month) + self.assertEqual(prev1.day, 22) + self.assertEqual(prev1.hour, 0) + self.assertEqual(prev1.minute, 0) + + prev2 = itr.get_prev(datetime) + self.assertEqual(prev2.year, base.year) + self.assertEqual(prev2.month, base.month) + self.assertEqual(prev2.day, 21) + self.assertEqual(prev2.hour, 0) + self.assertEqual(prev2.minute, 0) + + prev3 = itr.get_prev(datetime) + self.assertEqual(prev3.year, base.year) + self.assertEqual(prev3.month, base.month) + self.assertEqual(prev3.day, 15) + self.assertEqual(prev3.hour, 0) + self.assertEqual(prev3.minute, 0) + + def testPrevNthWeekDay(self): + base = datetime(2010, 8, 25, 15, 56) + itr = croniter('0 0 * * sat#1,sun#2', base) + prev1 = itr.get_prev(datetime) + self.assertEqual(prev1.year, base.year) + self.assertEqual(prev1.month, base.month) + self.assertEqual(prev1.day, 8) + self.assertEqual(prev1.hour, 0) + self.assertEqual(prev1.minute, 0) + + prev2 = itr.get_prev(datetime) + self.assertEqual(prev2.year, base.year) + self.assertEqual(prev2.month, base.month) + self.assertEqual(prev2.day, 7) + self.assertEqual(prev2.hour, 0) + self.assertEqual(prev2.minute, 0) + + prev3 = itr.get_prev(datetime) + self.assertEqual(prev3.year, base.year) + self.assertEqual(prev3.month, 7) + self.assertEqual(prev3.day, 11) + self.assertEqual(prev3.hour, 0) + self.assertEqual(prev3.minute, 0) + + def testPrevWeekDay2(self): + base = datetime(2010, 8, 25, 15, 56) + itr = croniter('10 0 * * 0', base) + prev = itr.get_prev(datetime) + self.assertEqual(prev.day, 22) + self.assertEqual(prev.hour, 0) + self.assertEqual(prev.minute, 10) + + def testISOWeekday(self): + base = datetime(2010, 2, 25) + itr = croniter('0 0 * * 7', base) + n1 = itr.get_next(datetime) + self.assertEqual(n1.isoweekday(), 7) + self.assertEqual(n1.day, 28) + n2 = itr.get_next(datetime) + self.assertEqual(n2.isoweekday(), 7) + self.assertEqual(n2.day, 7) + self.assertEqual(n2.month, 3) + + def testBug1(self): + base = datetime(2012, 2, 24) + itr = croniter('5 0 */2 * *', base) + n1 = itr.get_prev(datetime) + self.assertEqual(n1.hour, 0) + self.assertEqual(n1.minute, 5) + self.assertEqual(n1.month, 2) + # month starts from 1, 3 .... then 21, 23 + # so correct is not 22 but 23 + self.assertEqual(n1.day, 23) + + def testBug2(self): + base = datetime(2012, 1, 1, 0, 0) + iter = croniter('0 * * 3 *', base) + n1 = iter.get_next(datetime) + self.assertEqual(n1.year, base.year) + self.assertEqual(n1.month, 3) + self.assertEqual(n1.day, base.day) + self.assertEqual(n1.hour, base.hour) + self.assertEqual(n1.minute, base.minute) + + n2 = iter.get_next(datetime) + self.assertEqual(n2.year, base.year) + self.assertEqual(n2.month, 3) + self.assertEqual(n2.day, base.day) + self.assertEqual(n2.hour, base.hour + 1) + self.assertEqual(n2.minute, base.minute) + + n3 = iter.get_next(datetime) + self.assertEqual(n3.year, base.year) + self.assertEqual(n3.month, 3) + self.assertEqual(n3.day, base.day) + self.assertEqual(n3.hour, base.hour + 2) + self.assertEqual(n3.minute, base.minute) + + def testBug3(self): + base = datetime(2013, 3, 1, 12, 17, 34, 257877) + c = croniter('00 03 16,30 * *', base) + + n1 = c.get_next(datetime) + self.assertEqual(n1.month, 3) + self.assertEqual(n1.day, 16) + + n2 = c.get_next(datetime) + self.assertEqual(n2.month, 3) + self.assertEqual(n2.day, 30) + + n3 = c.get_next(datetime) + self.assertEqual(n3.month, 4) + self.assertEqual(n3.day, 16) + + n4 = c.get_prev(datetime) + self.assertEqual(n4.month, 3) + self.assertEqual(n4.day, 30) + + n5 = c.get_prev(datetime) + self.assertEqual(n5.month, 3) + self.assertEqual(n5.day, 16) + + n6 = c.get_prev(datetime) + self.assertEqual(n6.month, 2) + self.assertEqual(n6.day, 16) + + def test_bug34(self): + base = datetime(2012, 2, 24, 0, 0, 0) + itr = croniter('* * 31 2 *', base) + try: + itr.get_next(datetime) + except (CroniterBadDateError,) as ex: + self.assertEqual("{0}".format(ex), + 'failed to find next date') + + def testBug57(self): + base = datetime(2012, 2, 24, 0, 0, 0) + itr = croniter('0 4/6 * * *', base) + n1 = itr.get_next(datetime) + self.assertEqual(n1.hour, 4) + self.assertEqual(n1.minute, 0) + self.assertEqual(n1.month, 2) + self.assertEqual(n1.day, 24) + + n1 = itr.get_prev(datetime) + self.assertEqual(n1.hour, 22) + self.assertEqual(n1.minute, 0) + self.assertEqual(n1.month, 2) + self.assertEqual(n1.day, 23) + + itr = croniter('0 0/6 * * *', base) + n1 = itr.get_next(datetime) + self.assertEqual(n1.hour, 6) + self.assertEqual(n1.minute, 0) + self.assertEqual(n1.month, 2) + self.assertEqual(n1.day, 24) + + n1 = itr.get_prev(datetime) + self.assertEqual(n1.hour, 0) + self.assertEqual(n1.minute, 0) + self.assertEqual(n1.month, 2) + self.assertEqual(n1.day, 24) + + def test_multiple_months(self): + base = datetime(2016, 3, 1, 0, 0, 0) + itr = croniter('0 0 1 3,6,9,12 *', base) + n1 = itr.get_next(datetime) + self.assertEqual(n1.hour, 0) + self.assertEqual(n1.month, 6) + self.assertEqual(n1.day, 1) + self.assertEqual(n1.year, 2016) + + base = datetime(2016, 2, 15, 0, 0, 0) + itr = croniter('0 0 1 3,6,9,12 *', base) + n1 = itr.get_next(datetime) + self.assertEqual(n1.hour, 0) + self.assertEqual(n1.month, 3) + self.assertEqual(n1.day, 1) + self.assertEqual(n1.year, 2016) + + base = datetime(2016, 12, 3, 10, 0, 0) + itr = croniter('0 0 1 3,6,9,12 *', base) + n1 = itr.get_next(datetime) + self.assertEqual(n1.hour, 0) + self.assertEqual(n1.month, 3) + self.assertEqual(n1.day, 1) + self.assertEqual(n1.year, 2017) + + # The result with this parameters was incorrect. + # self.assertEqual(p1.month, 12 + # AssertionError: 9 != 12 + base = datetime(2016, 3, 1, 0, 0, 0) + itr = croniter('0 0 1 3,6,9,12 *', base) + p1 = itr.get_prev(datetime) + self.assertEqual(p1.hour, 0) + self.assertEqual(p1.month, 12) + self.assertEqual(p1.day, 1) + self.assertEqual(p1.year, 2015) + + # check my change resolves another hidden bug. + base = datetime(2016, 2, 1, 0, 0, 0) + itr = croniter('0 0 1,15,31 * *', base) + p1 = itr.get_prev(datetime) + self.assertEqual(p1.hour, 0) + self.assertEqual(p1.month, 1) + self.assertEqual(p1.day, 31) + self.assertEqual(p1.year, 2016) + + base = datetime(2016, 6, 1, 0, 0, 0) + itr = croniter('0 0 1 3,6,9,12 *', base) + p1 = itr.get_prev(datetime) + self.assertEqual(p1.hour, 0) + self.assertEqual(p1.month, 3) + self.assertEqual(p1.day, 1) + self.assertEqual(p1.year, 2016) + + base = datetime(2016, 3, 1, 0, 0, 0) + itr = croniter('0 0 1 1,3,6,9,12 *', base) + p1 = itr.get_prev(datetime) + self.assertEqual(p1.hour, 0) + self.assertEqual(p1.month, 1) + self.assertEqual(p1.day, 1) + self.assertEqual(p1.year, 2016) + + base = datetime(2016, 3, 1, 0, 0, 0) + itr = croniter('0 0 1 1,3,6,9,12 *', base) + p1 = itr.get_prev(datetime) + self.assertEqual(p1.hour, 0) + self.assertEqual(p1.month, 1) + self.assertEqual(p1.day, 1) + self.assertEqual(p1.year, 2016) + + def test_rangeGenerator(self): + base = datetime(2013, 3, 4, 0, 0) + itr = croniter('1-9/2 0 1 * *', base) + n1 = itr.get_next(datetime) + n2 = itr.get_next(datetime) + n3 = itr.get_next(datetime) + n4 = itr.get_next(datetime) + n5 = itr.get_next(datetime) + self.assertEqual(n1.minute, 1) + self.assertEqual(n2.minute, 3) + self.assertEqual(n3.minute, 5) + self.assertEqual(n4.minute, 7) + self.assertEqual(n5.minute, 9) + + def testPreviousHour(self): + base = datetime(2012, 6, 23, 17, 41) + itr = croniter('* 10 * * *', base) + prev1 = itr.get_prev(datetime) + self.assertEqual(prev1.year, base.year) + self.assertEqual(prev1.month, base.month) + self.assertEqual(prev1.day, base.day) + self.assertEqual(prev1.hour, 10) + self.assertEqual(prev1.minute, 59) + + def testPreviousDay(self): + base = datetime(2012, 6, 27, 0, 15) + itr = croniter('* * 26 * *', base) + prev1 = itr.get_prev(datetime) + self.assertEqual(prev1.year, base.year) + self.assertEqual(prev1.month, base.month) + self.assertEqual(prev1.day, 26) + self.assertEqual(prev1.hour, 23) + self.assertEqual(prev1.minute, 59) + + def testPreviousMonth(self): + base = datetime(2012, 6, 18, 0, 15) + itr = croniter('* * * 5 *', base) + prev1 = itr.get_prev(datetime) + self.assertEqual(prev1.year, base.year) + self.assertEqual(prev1.month, 5) + self.assertEqual(prev1.day, 31) + self.assertEqual(prev1.hour, 23) + self.assertEqual(prev1.minute, 59) + + def testPreviousDow(self): + base = datetime(2012, 5, 13, 18, 48) + itr = croniter('* * * * sat', base) + prev1 = itr.get_prev(datetime) + self.assertEqual(prev1.year, base.year) + self.assertEqual(prev1.month, base.month) + self.assertEqual(prev1.day, 12) + self.assertEqual(prev1.hour, 23) + self.assertEqual(prev1.minute, 59) + + def testGetCurrent(self): + base = datetime(2012, 9, 25, 11, 24) + itr = croniter('* * * * *', base) + res = itr.get_current(datetime) + self.assertEqual(base.year, res.year) + self.assertEqual(base.month, res.month) + self.assertEqual(base.day, res.day) + self.assertEqual(base.hour, res.hour) + self.assertEqual(base.minute, res.minute) + + def testTimezone(self): + base = datetime(2013, 3, 4, 12, 15) + itr = croniter('* * * * *', base) + n1 = itr.get_next(datetime) + self.assertEqual(n1.tzinfo, None) + + tokyo = pytz.timezone('Asia/Tokyo') + itr2 = croniter('* * * * *', tokyo.localize(base)) + n2 = itr2.get_next(datetime) + self.assertEqual(n2.tzinfo.zone, 'Asia/Tokyo') + + def testInitNoStartTime(self): + itr = croniter('* * * * *') + sleep(.01) + itr2 = croniter('* * * * *') + # Greater dosnt exists in py26 + self.assertTrue(itr2.cur > itr.cur) + + def assertScheduleTimezone(self, callback, expected_schedule): + for expected_date, expected_offset in expected_schedule: + d = callback() + self.assertEqual(expected_date, d.replace(tzinfo=None)) + self.assertEqual(expected_offset, + croniter._timedelta_to_seconds(d.utcoffset())) + + def testTimezoneWinterTime(self): + tz = pytz.timezone('Europe/Athens') + + expected_schedule = [ + (datetime(2013, 10, 27, 2, 30, 0), 10800), + (datetime(2013, 10, 27, 3, 0, 0), 10800), + (datetime(2013, 10, 27, 3, 30, 0), 10800), + (datetime(2013, 10, 27, 3, 0, 0), 7200), + (datetime(2013, 10, 27, 3, 30, 0), 7200), + (datetime(2013, 10, 27, 4, 0, 0), 7200), + (datetime(2013, 10, 27, 4, 30, 0), 7200), + ] + + start = datetime(2013, 10, 27, 2, 0, 0) + ct = croniter('*/30 * * * *', tz.localize(start)) + self.assertScheduleTimezone(lambda: ct.get_next(datetime), expected_schedule) + + start = datetime(2013, 10, 27, 5, 0, 0) + ct = croniter('*/30 * * * *', tz.localize(start)) + self.assertScheduleTimezone(lambda: ct.get_prev(datetime), reversed(expected_schedule)) + + def testTimezoneSummerTime(self): + tz = pytz.timezone('Europe/Athens') + + expected_schedule = [ + (datetime(2013, 3, 31, 1, 30, 0), 7200), + (datetime(2013, 3, 31, 2, 0, 0), 7200), + (datetime(2013, 3, 31, 2, 30, 0), 7200), + (datetime(2013, 3, 31, 4, 0, 0), 10800), + (datetime(2013, 3, 31, 4, 30, 0), 10800), + ] + + start = datetime(2013, 3, 31, 1, 0, 0) + ct = croniter('*/30 * * * *', tz.localize(start)) + self.assertScheduleTimezone(lambda: ct.get_next(datetime), expected_schedule) + + start = datetime(2013, 3, 31, 5, 0, 0) + ct = croniter('*/30 * * * *', tz.localize(start)) + self.assertScheduleTimezone(lambda: ct.get_prev(datetime), reversed(expected_schedule)) + + def test_std_dst(self): + """ + DST tests + + This fixes https://github.com/taichino/croniter/issues/82 + + """ + tz = pytz.timezone('Europe/Warsaw') + # -> 2017-03-26 01:59+1:00 -> 03:00+2:00 + local_date = tz.localize(datetime(2017, 3, 26)) + val = croniter('0 0 * * *', local_date).get_next(datetime) + self.assertEqual(val, tz.localize(datetime(2017, 3, 27))) + # + local_date = tz.localize(datetime(2017, 3, 26, 1)) + cr = croniter('0 * * * *', local_date) + val = cr.get_next(datetime) + self.assertEqual(val, tz.localize(datetime(2017, 3, 26, 3))) + val = cr.get_current(datetime) + self.assertEqual(val, tz.localize(datetime(2017, 3, 26, 3))) + + # -> 2017-10-29 02:59+2:00 -> 02:00+1:00 + local_date = tz.localize(datetime(2017, 10, 29)) + val = croniter('0 0 * * *', local_date).get_next(datetime) + self.assertEqual(val, tz.localize(datetime(2017, 10, 30))) + local_date = tz.localize(datetime(2017, 10, 29, 1, 59)) + val = croniter('0 * * * *', local_date).get_next(datetime) + self.assertEqual( + val.replace(tzinfo=None), + tz.localize(datetime(2017, 10, 29, 2)).replace(tzinfo=None)) + local_date = tz.localize(datetime(2017, 10, 29, 2)) + val = croniter('0 * * * *', local_date).get_next(datetime) + self.assertEqual(val, tz.localize(datetime(2017, 10, 29, 3))) + local_date = tz.localize(datetime(2017, 10, 29, 3)) + val = croniter('0 * * * *', local_date).get_next(datetime) + self.assertEqual(val, tz.localize(datetime(2017, 10, 29, 4))) + local_date = tz.localize(datetime(2017, 10, 29, 4)) + val = croniter('0 * * * *', local_date).get_next(datetime) + self.assertEqual(val, tz.localize(datetime(2017, 10, 29, 5))) + local_date = tz.localize(datetime(2017, 10, 29, 5)) + val = croniter('0 * * * *', local_date).get_next(datetime) + self.assertEqual(val, tz.localize(datetime(2017, 10, 29, 6))) + + def test_std_dst2(self): + """ + DST tests + + This fixes https://github.com/taichino/croniter/issues/87 + + São Paulo, Brazil: 18/02/2018 00:00 -> 17/02/2018 23:00 + + """ + tz = pytz.timezone("America/Sao_Paulo") + local_dates = [ + # 17-22: 00 -> 18-00:00 + (tz.localize(datetime(2018, 2, 17, 21, 0, 0)), + '2018-02-18 00:00:00-03:00'), + # 17-23: 00 -> 18-00:00 + (tz.localize(datetime(2018, 2, 17, 22, 0, 0)), + '2018-02-18 00:00:00-03:00'), + # 17-23: 00 -> 18-00:00 + (tz.localize(datetime(2018, 2, 17, 23, 0, 0)), + '2018-02-18 00:00:00-03:00'), + # 18-00: 00 -> 19-00:00 + (tz.localize(datetime(2018, 2, 18, 0, 0, 0)), + '2018-02-19 00:00:00-03:00'), + # 17-22: 00 -> 18-00:00 + (tz.localize(datetime(2018, 2, 17, 21, 5, 0)), + '2018-02-18 00:00:00-03:00'), + # 17-23: 00 -> 18-00:00 + (tz.localize(datetime(2018, 2, 17, 22, 5, 0)), + '2018-02-18 00:00:00-03:00'), + # 17-23: 00 -> 18-00:00 + (tz.localize(datetime(2018, 2, 17, 23, 5, 0)), + '2018-02-18 00:00:00-03:00'), + # 18-00: 00 -> 19-00:00 + (tz.localize(datetime(2018, 2, 18, 0, 5, 0)), + '2018-02-19 00:00:00-03:00'), + ] + ret1 = [croniter("0 0 * * *", d[0]).get_next(datetime) + for d in local_dates] + sret1 = ['{0}'.format(d) for d in ret1] + lret1 = ['{0}'.format(d[1]) for d in local_dates] + self.assertEqual(sret1, lret1) + + def test_error_alpha_cron(self): + self.assertRaises(CroniterNotAlphaError, croniter.expand, + '* * * janu-jun *') + + def test_error_bad_cron(self): + self.assertRaises(CroniterBadCronError, croniter.expand, + '* * * *') + self.assertRaises(CroniterBadCronError, croniter.expand, + '* * * * * * *') + + def test_is_valid(self): + self.assertTrue(croniter.is_valid('0 * * * *')) + self.assertFalse(croniter.is_valid('0 * *')) + self.assertFalse(croniter.is_valid('* * * janu-jun *')) + + def test_exactly_the_same_minute(self): + base = datetime(2018, 3, 5, 12, 30, 50) + itr = croniter('30 7,12,17 * * *', base) + n1 = itr.get_prev(datetime) + self.assertEqual(12, n1.hour) + + n2 = itr.get_prev(datetime) + self.assertEqual(7, n2.hour) + + n3 = itr.get_next(datetime) + self.assertEqual(12, n3.hour) + + def test_next_when_now_satisfies_cron(self): + ts_a = datetime(2018, 5, 21, 0, 3, 0) + ts_b = datetime(2018, 5, 21, 0, 4, 20) + test_cron = '4 * * * *' + + next_a = croniter(test_cron, start_time=ts_a).get_next() + next_b = croniter(test_cron, start_time=ts_b).get_next() + + self.assertTrue(next_b > next_a) + + def test_milliseconds(self): + """ + https://github.com/taichino/croniter/issues/107 + """ + # + dt = datetime(2018, 1, 2, 10, 0, 0, 500) + c = croniter("0 10 * * * ", start_time=dt) + ts = "{0}".format(datetime.utcfromtimestamp(c.get_prev())) + self.assertEqual(ts, '2018-01-02 10:00:00') + # + dt = datetime(2018, 1, 2, 10, 0, 1, 0) + c = croniter("0 10 * * * ", start_time=dt) + ts = "{0}".format(datetime.utcfromtimestamp(c.get_prev())) + self.assertEqual(ts, '2018-01-02 10:00:00') + # + dt = datetime(2018, 1, 2, 9, 59, 59, 999999) + c = croniter("0 10 * * * ", start_time=dt) + ts = "{0}".format(datetime.utcfromtimestamp(c.get_prev())) + self.assertEqual(ts, '2018-01-01 10:00:00') + + +if __name__ == '__main__': + unittest.main() diff --git a/thesisenv/lib/python3.6/site-packages/croniter/tests/test_speed.py b/thesisenv/lib/python3.6/site-packages/croniter/tests/test_speed.py new file mode 100644 index 0000000..6aafe6f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/croniter/tests/test_speed.py @@ -0,0 +1,225 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from __future__ import absolute_import, print_function +from datetime import datetime +from croniter import croniter + +from croniter.tests import base + + +class timerTest(object): + def __init__(self): + self.tests = tuple(getattr(self, m) + for m in dir(self) + if m.lower().startswith('test')) + + def run(self): + for test in self.tests: + test() + + +class CroniterTest(timerTest): + def testMinute(self): + # minute asterisk + base = datetime(2010, 1, 23, 12, 18) + itr = croniter('*/1 * * * *', base) + n1 = itr.get_next(datetime) # 19 + base.year == n1.year + base.month == n1.month + base.day == n1.day + base.hour == n1.hour + base.minute == n1.minute - 1 + for i in range(39): # ~ 58 + itr.get_next() + + n2 = itr.get_next(datetime) + n2.minute == 59 + + n3 = itr.get_next(datetime) + + n3.minute == 0 + n3.hour == 13 + + itr = croniter('*/5 * * * *', base) + n4 = itr.get_next(datetime) + n4.minute == 20 + for i in range(6): + itr.get_next() + n5 = itr.get_next(datetime) + n5.minute == 55 + + n6 = itr.get_next(datetime) + n6.minute == 0 + n6.hour == 13 + + def testHour(self): + base = datetime(2010, 1, 24, 12, 2) + itr = croniter('0 */3 * * *', base) + n1 = itr.get_next(datetime) + + n1.hour == 15 + n1.minute == 0 + + for i in range(2): + itr.get_next() + + n2 = itr.get_next(datetime) + n2.hour == 0 + n2.day == 25 + + def testDay(self): + base = datetime(2010, 2, 24, 12, 9) + itr = croniter('0 0 */3 * *', base) + n1 = itr.get_next(datetime) + n1.day == 27 + n2 = itr.get_next(datetime) + n2.day == 3 + + # test leap year + base = datetime(1996, 2, 27) + itr = croniter('0 0 * * *', base) + n1 = itr.get_next(datetime) + n1.day == 28 + n1.month == 2 + n2 = itr.get_next(datetime) + n2.day == 29 + n2.month == 2 + + base2 = datetime(2000, 2, 27) + itr2 = croniter('0 0 * * *', base2) + n3 = itr2.get_next(datetime) + n3.day == 28 + n3.month == 2 + n4 = itr2.get_next(datetime) + n4.day == 29 + n4.month == 2 + + def testWeekDay(self): + base = datetime(2010, 2, 25) + itr = croniter('0 0 * * sat', base) + n1 = itr.get_next(datetime) + n1.isoweekday() == 6 + n1.day == 27 + + n2 = itr.get_next(datetime) + n2.isoweekday() == 6 + n2.day == 6 + n2.month == 3 + + base = datetime(2010, 1, 25) + itr = croniter('0 0 1 * wed', base) + n1 = itr.get_next(datetime) + n1.month == 1 + n1.day == 27 + n1.year == 2010 + + n2 = itr.get_next(datetime) + n2.month == 2 + n2.day == 1 + n2.year == 2010 + + n3 = itr.get_next(datetime) + n3.month == 2 + n3.day == 3 + n3.year == 2010 + + def testMonth(self): + base = datetime(2010, 1, 25) + itr = croniter('0 0 1 * *', base) + n1 = itr.get_next(datetime) + + n1.month == 2 + n1.day == 1 + + n2 = itr.get_next(datetime) + n2.month == 3 + n2.day == 1 + + for i in range(8): + itr.get_next() + + n3 = itr.get_next(datetime) + n3.month == 12 + n3.year == 2010 + + n4 = itr.get_next(datetime) + n4.month == 1 + n4.year == 2011 + + def testPrevMinute(self): + base = datetime(2010, 8, 25, 15, 56) + itr = croniter('*/1 * * * *', base) + prev = itr.get_prev(datetime) + base.year == prev.year + base.month == prev.month + base.day == prev.day + base.hour == prev.hour + base.minute, prev.minute + 1 + + base = datetime(2010, 8, 25, 15, 0) + itr = croniter('*/1 * * * *', base) + prev = itr.get_prev(datetime) + base.year == prev.year + base.month == prev.month + base.day == prev.day + base.hour == prev.hour + 1 + 59 == prev.minute + + base = datetime(2010, 8, 25, 0, 0) + itr = croniter('*/1 * * * *', base) + prev = itr.get_prev(datetime) + base.year == prev.year + base.month == prev.month + base.day == prev.day + 1 + 23 == prev.hour + 59 == prev.minute + + def testPrevWeekDay(self): + base = datetime(2010, 8, 25, 15, 56) + itr = croniter('0 0 * * sat,sun', base) + prev1 = itr.get_prev(datetime) + prev1.year == base.year + prev1.month == base.month + prev1.day == 22 + prev1.hour == 0 + prev1.minute == 0 + + prev2 = itr.get_prev(datetime) + prev2.year == base.year + prev2.month == base.month + prev2.day == 21 + prev2.hour == 0 + prev2.minute == 0 + + prev3 = itr.get_prev(datetime) + prev3.year == base.year + prev3.month == base.month + prev3.day == 15 + prev3.hour == 0 + prev3.minute == 0 + + def testISOWeekday(self): + base = datetime(2010, 2, 25) + itr = croniter('0 0 * * 7', base) + n1 = itr.get_next(datetime) + n1.isoweekday() == 7 + n1.day == 28 + + n2 = itr.get_next(datetime) + n2.isoweekday() == 7 + n2.day == 7 + n2.month == 3 + + +class TestCase(base.TestCase): + """make zope.testrunner happy""" + + def test_Noop(self): + self.assertEqual(1, 1) + + +if __name__ == '__main__': + from timeit import Timer + t = Timer('c=CroniterTest();c.run()', 'from __main__ import CroniterTest') + print(t.timeit(200)) diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/__init__.py b/thesisenv/lib/python3.6/site-packages/dateutil/__init__.py new file mode 100644 index 0000000..0defb82 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/dateutil/__init__.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +try: + from ._version import version as __version__ +except ImportError: + __version__ = 'unknown' + +__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz', + 'utils', 'zoneinfo'] diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/_common.py b/thesisenv/lib/python3.6/site-packages/dateutil/_common.py new file mode 100644 index 0000000..4eb2659 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/dateutil/_common.py @@ -0,0 +1,43 @@ +""" +Common code used in multiple modules. +""" + + +class weekday(object): + __slots__ = ["weekday", "n"] + + def __init__(self, weekday, n=None): + self.weekday = weekday + self.n = n + + def __call__(self, n): + if n == self.n: + return self + else: + return self.__class__(self.weekday, n) + + def __eq__(self, other): + try: + if self.weekday != other.weekday or self.n != other.n: + return False + except AttributeError: + return False + return True + + def __hash__(self): + return hash(( + self.weekday, + self.n, + )) + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] + if not self.n: + return s + else: + return "%s(%+d)" % (s, self.n) + +# vim:ts=4:sw=4:et diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/_version.py b/thesisenv/lib/python3.6/site-packages/dateutil/_version.py new file mode 100644 index 0000000..713fe0d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/dateutil/_version.py @@ -0,0 +1,4 @@ +# coding: utf-8 +# file generated by setuptools_scm +# don't change, don't track in version control +version = '2.7.3' diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/easter.py b/thesisenv/lib/python3.6/site-packages/dateutil/easter.py new file mode 100644 index 0000000..53b7c78 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/dateutil/easter.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +""" +This module offers a generic easter computing method for any given year, using +Western, Orthodox or Julian algorithms. +""" + +import datetime + +__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] + +EASTER_JULIAN = 1 +EASTER_ORTHODOX = 2 +EASTER_WESTERN = 3 + + +def easter(year, method=EASTER_WESTERN): + """ + This method was ported from the work done by GM Arts, + on top of the algorithm by Claus Tondering, which was + based in part on the algorithm of Ouding (1940), as + quoted in "Explanatory Supplement to the Astronomical + Almanac", P. Kenneth Seidelmann, editor. + + This algorithm implements three different easter + calculation methods: + + 1 - Original calculation in Julian calendar, valid in + dates after 326 AD + 2 - Original method, with date converted to Gregorian + calendar, valid in years 1583 to 4099 + 3 - Revised method, in Gregorian calendar, valid in + years 1583 to 4099 as well + + These methods are represented by the constants: + + * ``EASTER_JULIAN = 1`` + * ``EASTER_ORTHODOX = 2`` + * ``EASTER_WESTERN = 3`` + + The default method is method 3. + + More about the algorithm may be found at: + + `GM Arts: Easter Algorithms `_ + + and + + `The Calendar FAQ: Easter `_ + + """ + + if not (1 <= method <= 3): + raise ValueError("invalid method") + + # g - Golden year - 1 + # c - Century + # h - (23 - Epact) mod 30 + # i - Number of days from March 21 to Paschal Full Moon + # j - Weekday for PFM (0=Sunday, etc) + # p - Number of days from March 21 to Sunday on or before PFM + # (-6 to 28 methods 1 & 3, to 56 for method 2) + # e - Extra days to add for method 2 (converting Julian + # date to Gregorian date) + + y = year + g = y % 19 + e = 0 + if method < 3: + # Old method + i = (19*g + 15) % 30 + j = (y + y//4 + i) % 7 + if method == 2: + # Extra dates to convert Julian to Gregorian date + e = 10 + if y > 1600: + e = e + y//100 - 16 - (y//100 - 16)//4 + else: + # New method + c = y//100 + h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30 + i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11)) + j = (y + y//4 + i + 2 - c + c//4) % 7 + + # p can be from -6 to 56 corresponding to dates 22 March to 23 May + # (later dates apply to method 2, although 23 May never actually occurs) + p = i - j + e + d = 1 + (p + 27 + (p + 6)//40) % 31 + m = 3 + (p + 26)//30 + return datetime.date(int(y), int(m), int(d)) diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/parser/__init__.py b/thesisenv/lib/python3.6/site-packages/dateutil/parser/__init__.py new file mode 100644 index 0000000..216762c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/dateutil/parser/__init__.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +from ._parser import parse, parser, parserinfo +from ._parser import DEFAULTPARSER, DEFAULTTZPARSER +from ._parser import UnknownTimezoneWarning + +from ._parser import __doc__ + +from .isoparser import isoparser, isoparse + +__all__ = ['parse', 'parser', 'parserinfo', + 'isoparse', 'isoparser', + 'UnknownTimezoneWarning'] + + +### +# Deprecate portions of the private interface so that downstream code that +# is improperly relying on it is given *some* notice. + + +def __deprecated_private_func(f): + from functools import wraps + import warnings + + msg = ('{name} is a private function and may break without warning, ' + 'it will be moved and or renamed in future versions.') + msg = msg.format(name=f.__name__) + + @wraps(f) + def deprecated_func(*args, **kwargs): + warnings.warn(msg, DeprecationWarning) + return f(*args, **kwargs) + + return deprecated_func + +def __deprecate_private_class(c): + import warnings + + msg = ('{name} is a private class and may break without warning, ' + 'it will be moved and or renamed in future versions.') + msg = msg.format(name=c.__name__) + + class private_class(c): + __doc__ = c.__doc__ + + def __init__(self, *args, **kwargs): + warnings.warn(msg, DeprecationWarning) + super(private_class, self).__init__(*args, **kwargs) + + private_class.__name__ = c.__name__ + + return private_class + + +from ._parser import _timelex, _resultbase +from ._parser import _tzparser, _parsetz + +_timelex = __deprecate_private_class(_timelex) +_tzparser = __deprecate_private_class(_tzparser) +_resultbase = __deprecate_private_class(_resultbase) +_parsetz = __deprecated_private_func(_parsetz) diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/parser/_parser.py b/thesisenv/lib/python3.6/site-packages/dateutil/parser/_parser.py new file mode 100644 index 0000000..9d2bb79 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/dateutil/parser/_parser.py @@ -0,0 +1,1578 @@ +# -*- coding: utf-8 -*- +""" +This module offers a generic date/time string parser which is able to parse +most known formats to represent a date and/or time. + +This module attempts to be forgiving with regards to unlikely input formats, +returning a datetime object even for dates which are ambiguous. If an element +of a date/time stamp is omitted, the following rules are applied: + +- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour + on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is + specified. +- If a time zone is omitted, a timezone-naive datetime is returned. + +If any other elements are missing, they are taken from the +:class:`datetime.datetime` object passed to the parameter ``default``. If this +results in a day number exceeding the valid number of days per month, the +value falls back to the end of the month. + +Additional resources about date/time string formats can be found below: + +- `A summary of the international standard date and time notation + `_ +- `W3C Date and Time Formats `_ +- `Time Formats (Planetary Rings Node) `_ +- `CPAN ParseDate module + `_ +- `Java SimpleDateFormat Class + `_ +""" +from __future__ import unicode_literals + +import datetime +import re +import string +import time +import warnings + +from calendar import monthrange +from io import StringIO + +import six +from six import binary_type, integer_types, text_type + +from decimal import Decimal + +from warnings import warn + +from .. import relativedelta +from .. import tz + +__all__ = ["parse", "parserinfo"] + + +# TODO: pandas.core.tools.datetimes imports this explicitly. Might be worth +# making public and/or figuring out if there is something we can +# take off their plate. +class _timelex(object): + # Fractional seconds are sometimes split by a comma + _split_decimal = re.compile("([.,])") + + def __init__(self, instream): + if six.PY2: + # In Python 2, we can't duck type properly because unicode has + # a 'decode' function, and we'd be double-decoding + if isinstance(instream, (binary_type, bytearray)): + instream = instream.decode() + else: + if getattr(instream, 'decode', None) is not None: + instream = instream.decode() + + if isinstance(instream, text_type): + instream = StringIO(instream) + elif getattr(instream, 'read', None) is None: + raise TypeError('Parser must be a string or character stream, not ' + '{itype}'.format(itype=instream.__class__.__name__)) + + self.instream = instream + self.charstack = [] + self.tokenstack = [] + self.eof = False + + def get_token(self): + """ + This function breaks the time string into lexical units (tokens), which + can be parsed by the parser. Lexical units are demarcated by changes in + the character set, so any continuous string of letters is considered + one unit, any continuous string of numbers is considered one unit. + + The main complication arises from the fact that dots ('.') can be used + both as separators (e.g. "Sep.20.2009") or decimal points (e.g. + "4:30:21.447"). As such, it is necessary to read the full context of + any dot-separated strings before breaking it into tokens; as such, this + function maintains a "token stack", for when the ambiguous context + demands that multiple tokens be parsed at once. + """ + if self.tokenstack: + return self.tokenstack.pop(0) + + seenletters = False + token = None + state = None + + while not self.eof: + # We only realize that we've reached the end of a token when we + # find a character that's not part of the current token - since + # that character may be part of the next token, it's stored in the + # charstack. + if self.charstack: + nextchar = self.charstack.pop(0) + else: + nextchar = self.instream.read(1) + while nextchar == '\x00': + nextchar = self.instream.read(1) + + if not nextchar: + self.eof = True + break + elif not state: + # First character of the token - determines if we're starting + # to parse a word, a number or something else. + token = nextchar + if self.isword(nextchar): + state = 'a' + elif self.isnum(nextchar): + state = '0' + elif self.isspace(nextchar): + token = ' ' + break # emit token + else: + break # emit token + elif state == 'a': + # If we've already started reading a word, we keep reading + # letters until we find something that's not part of a word. + seenletters = True + if self.isword(nextchar): + token += nextchar + elif nextchar == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0': + # If we've already started reading a number, we keep reading + # numbers until we find something that doesn't fit. + if self.isnum(nextchar): + token += nextchar + elif nextchar == '.' or (nextchar == ',' and len(token) >= 2): + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == 'a.': + # If we've seen some letters and a dot separator, continue + # parsing, and the tokens will be broken up later. + seenletters = True + if nextchar == '.' or self.isword(nextchar): + token += nextchar + elif self.isnum(nextchar) and token[-1] == '.': + token += nextchar + state = '0.' + else: + self.charstack.append(nextchar) + break # emit token + elif state == '0.': + # If we've seen at least one dot separator, keep going, we'll + # break up the tokens later. + if nextchar == '.' or self.isnum(nextchar): + token += nextchar + elif self.isword(nextchar) and token[-1] == '.': + token += nextchar + state = 'a.' + else: + self.charstack.append(nextchar) + break # emit token + + if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or + token[-1] in '.,')): + l = self._split_decimal.split(token) + token = l[0] + for tok in l[1:]: + if tok: + self.tokenstack.append(tok) + + if state == '0.' and token.count('.') == 0: + token = token.replace(',', '.') + + return token + + def __iter__(self): + return self + + def __next__(self): + token = self.get_token() + if token is None: + raise StopIteration + + return token + + def next(self): + return self.__next__() # Python 2.x support + + @classmethod + def split(cls, s): + return list(cls(s)) + + @classmethod + def isword(cls, nextchar): + """ Whether or not the next character is part of a word """ + return nextchar.isalpha() + + @classmethod + def isnum(cls, nextchar): + """ Whether the next character is part of a number """ + return nextchar.isdigit() + + @classmethod + def isspace(cls, nextchar): + """ Whether the next character is whitespace """ + return nextchar.isspace() + + +class _resultbase(object): + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def _repr(self, classname): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, repr(value))) + return "%s(%s)" % (classname, ", ".join(l)) + + def __len__(self): + return (sum(getattr(self, attr) is not None + for attr in self.__slots__)) + + def __repr__(self): + return self._repr(self.__class__.__name__) + + +class parserinfo(object): + """ + Class which handles what inputs are accepted. Subclass this to customize + the language and acceptable values for each parameter. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM + and YMD. Default is ``False``. + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken + to be the year, otherwise the last number is taken to be the year. + Default is ``False``. + """ + + # m from a.m/p.m, t from ISO T separator + JUMP = [" ", ".", ",", ";", "-", "/", "'", + "at", "on", "and", "ad", "m", "t", "of", + "st", "nd", "rd", "th"] + + WEEKDAYS = [("Mon", "Monday"), + ("Tue", "Tuesday"), # TODO: "Tues" + ("Wed", "Wednesday"), + ("Thu", "Thursday"), # TODO: "Thurs" + ("Fri", "Friday"), + ("Sat", "Saturday"), + ("Sun", "Sunday")] + MONTHS = [("Jan", "January"), + ("Feb", "February"), # TODO: "Febr" + ("Mar", "March"), + ("Apr", "April"), + ("May", "May"), + ("Jun", "June"), + ("Jul", "July"), + ("Aug", "August"), + ("Sep", "Sept", "September"), + ("Oct", "October"), + ("Nov", "November"), + ("Dec", "December")] + HMS = [("h", "hour", "hours"), + ("m", "minute", "minutes"), + ("s", "second", "seconds")] + AMPM = [("am", "a"), + ("pm", "p")] + UTCZONE = ["UTC", "GMT", "Z"] + PERTAIN = ["of"] + TZOFFSET = {} + # TODO: ERA = ["AD", "BC", "CE", "BCE", "Stardate", + # "Anno Domini", "Year of Our Lord"] + + def __init__(self, dayfirst=False, yearfirst=False): + self._jump = self._convert(self.JUMP) + self._weekdays = self._convert(self.WEEKDAYS) + self._months = self._convert(self.MONTHS) + self._hms = self._convert(self.HMS) + self._ampm = self._convert(self.AMPM) + self._utczone = self._convert(self.UTCZONE) + self._pertain = self._convert(self.PERTAIN) + + self.dayfirst = dayfirst + self.yearfirst = yearfirst + + self._year = time.localtime().tm_year + self._century = self._year // 100 * 100 + + def _convert(self, lst): + dct = {} + for i, v in enumerate(lst): + if isinstance(v, tuple): + for v in v: + dct[v.lower()] = i + else: + dct[v.lower()] = i + return dct + + def jump(self, name): + return name.lower() in self._jump + + def weekday(self, name): + try: + return self._weekdays[name.lower()] + except KeyError: + pass + return None + + def month(self, name): + try: + return self._months[name.lower()] + 1 + except KeyError: + pass + return None + + def hms(self, name): + try: + return self._hms[name.lower()] + except KeyError: + return None + + def ampm(self, name): + try: + return self._ampm[name.lower()] + except KeyError: + return None + + def pertain(self, name): + return name.lower() in self._pertain + + def utczone(self, name): + return name.lower() in self._utczone + + def tzoffset(self, name): + if name in self._utczone: + return 0 + + return self.TZOFFSET.get(name) + + def convertyear(self, year, century_specified=False): + """ + Converts two-digit years to year within [-50, 49] + range of self._year (current local time) + """ + + # Function contract is that the year is always positive + assert year >= 0 + + if year < 100 and not century_specified: + # assume current century to start + year += self._century + + if year >= self._year + 50: # if too far in future + year -= 100 + elif year < self._year - 50: # if too far in past + year += 100 + + return year + + def validate(self, res): + # move to info + if res.year is not None: + res.year = self.convertyear(res.year, res.century_specified) + + if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z': + res.tzname = "UTC" + res.tzoffset = 0 + elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname): + res.tzoffset = 0 + return True + + +class _ymd(list): + def __init__(self, *args, **kwargs): + super(self.__class__, self).__init__(*args, **kwargs) + self.century_specified = False + self.dstridx = None + self.mstridx = None + self.ystridx = None + + @property + def has_year(self): + return self.ystridx is not None + + @property + def has_month(self): + return self.mstridx is not None + + @property + def has_day(self): + return self.dstridx is not None + + def could_be_day(self, value): + if self.has_day: + return False + elif not self.has_month: + return 1 <= value <= 31 + elif not self.has_year: + # Be permissive, assume leapyear + month = self[self.mstridx] + return 1 <= value <= monthrange(2000, month)[1] + else: + month = self[self.mstridx] + year = self[self.ystridx] + return 1 <= value <= monthrange(year, month)[1] + + def append(self, val, label=None): + if hasattr(val, '__len__'): + if val.isdigit() and len(val) > 2: + self.century_specified = True + if label not in [None, 'Y']: # pragma: no cover + raise ValueError(label) + label = 'Y' + elif val > 100: + self.century_specified = True + if label not in [None, 'Y']: # pragma: no cover + raise ValueError(label) + label = 'Y' + + super(self.__class__, self).append(int(val)) + + if label == 'M': + if self.has_month: + raise ValueError('Month is already set') + self.mstridx = len(self) - 1 + elif label == 'D': + if self.has_day: + raise ValueError('Day is already set') + self.dstridx = len(self) - 1 + elif label == 'Y': + if self.has_year: + raise ValueError('Year is already set') + self.ystridx = len(self) - 1 + + def _resolve_from_stridxs(self, strids): + """ + Try to resolve the identities of year/month/day elements using + ystridx, mstridx, and dstridx, if enough of these are specified. + """ + if len(self) == 3 and len(strids) == 2: + # we can back out the remaining stridx value + missing = [x for x in range(3) if x not in strids.values()] + key = [x for x in ['y', 'm', 'd'] if x not in strids] + assert len(missing) == len(key) == 1 + key = key[0] + val = missing[0] + strids[key] = val + + assert len(self) == len(strids) # otherwise this should not be called + out = {key: self[strids[key]] for key in strids} + return (out.get('y'), out.get('m'), out.get('d')) + + def resolve_ymd(self, yearfirst, dayfirst): + len_ymd = len(self) + year, month, day = (None, None, None) + + strids = (('y', self.ystridx), + ('m', self.mstridx), + ('d', self.dstridx)) + + strids = {key: val for key, val in strids if val is not None} + if (len(self) == len(strids) > 0 or + (len(self) == 3 and len(strids) == 2)): + return self._resolve_from_stridxs(strids) + + mstridx = self.mstridx + + if len_ymd > 3: + raise ValueError("More than three YMD values") + elif len_ymd == 1 or (mstridx is not None and len_ymd == 2): + # One member, or two members with a month string + if mstridx is not None: + month = self[mstridx] + # since mstridx is 0 or 1, self[mstridx-1] always + # looks up the other element + other = self[mstridx - 1] + else: + other = self[0] + + if len_ymd > 1 or mstridx is None: + if other > 31: + year = other + else: + day = other + + elif len_ymd == 2: + # Two members with numbers + if self[0] > 31: + # 99-01 + year, month = self + elif self[1] > 31: + # 01-99 + month, year = self + elif dayfirst and self[1] <= 12: + # 13-01 + day, month = self + else: + # 01-13 + month, day = self + + elif len_ymd == 3: + # Three members + if mstridx == 0: + if self[1] > 31: + # Apr-2003-25 + month, year, day = self + else: + month, day, year = self + elif mstridx == 1: + if self[0] > 31 or (yearfirst and self[2] <= 31): + # 99-Jan-01 + year, month, day = self + else: + # 01-Jan-01 + # Give precendence to day-first, since + # two-digit years is usually hand-written. + day, month, year = self + + elif mstridx == 2: + # WTF!? + if self[1] > 31: + # 01-99-Jan + day, year, month = self + else: + # 99-01-Jan + year, day, month = self + + else: + if (self[0] > 31 or + self.ystridx == 0 or + (yearfirst and self[1] <= 12 and self[2] <= 31)): + # 99-01-01 + if dayfirst and self[2] <= 12: + year, day, month = self + else: + year, month, day = self + elif self[0] > 12 or (dayfirst and self[1] <= 12): + # 13-01-01 + day, month, year = self + else: + # 01-13-01 + month, day, year = self + + return year, month, day + + +class parser(object): + def __init__(self, info=None): + self.info = info or parserinfo() + + def parse(self, timestr, default=None, + ignoretz=False, tzinfos=None, **kwargs): + """ + Parse the date/time string into a :class:`datetime.datetime` object. + + :param timestr: + Any date/time string using the supported formats. + + :param default: + The default datetime object, if this is a datetime object and not + ``None``, elements specified in ``timestr`` replace elements in the + default object. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a + naive :class:`datetime.datetime` object is returned. + + :param tzinfos: + Additional time zone names / aliases which may be present in the + string. This argument maps time zone names (and optionally offsets + from those time zones) to time zones. This parameter can be a + dictionary with timezone aliases mapping time zone names to time + zones or a function taking two parameters (``tzname`` and + ``tzoffset``) and returning a time zone. + + The timezones to which the names are mapped can be an integer + offset from UTC in seconds or a :class:`tzinfo` object. + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from dateutil.parser import parse + >>> from dateutil.tz import gettz + >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")} + >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200)) + >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, + tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) + + This parameter is ignored if ``ignoretz`` is set. + + :param \\*\\*kwargs: + Keyword arguments as passed to ``_parse()``. + + :return: + Returns a :class:`datetime.datetime` object or, if the + ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the + first element being a :class:`datetime.datetime` object, the second + a tuple containing the fuzzy tokens. + + :raises ValueError: + Raised for invalid or unknown string format, if the provided + :class:`tzinfo` is not in a valid format, or if an invalid date + would be created. + + :raises TypeError: + Raised for non-string or character stream input. + + :raises OverflowError: + Raised if the parsed date exceeds the largest valid C integer on + your system. + """ + + if default is None: + default = datetime.datetime.now().replace(hour=0, minute=0, + second=0, microsecond=0) + + res, skipped_tokens = self._parse(timestr, **kwargs) + + if res is None: + raise ValueError("Unknown string format:", timestr) + + if len(res) == 0: + raise ValueError("String does not contain a date:", timestr) + + ret = self._build_naive(res, default) + + if not ignoretz: + ret = self._build_tzaware(ret, res, tzinfos) + + if kwargs.get('fuzzy_with_tokens', False): + return ret, skipped_tokens + else: + return ret + + class _result(_resultbase): + __slots__ = ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond", + "tzname", "tzoffset", "ampm","any_unused_tokens"] + + def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False, + fuzzy_with_tokens=False): + """ + Private method which performs the heavy lifting of parsing, called from + ``parse()``, which passes on its ``kwargs`` to this function. + + :param timestr: + The string to parse. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM + and YMD. If set to ``None``, this value is retrieved from the + current :class:`parserinfo` object (which itself defaults to + ``False``). + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken + to be the year, otherwise the last number is taken to be the year. + If this is set to ``None``, the value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param fuzzy: + Whether to allow fuzzy parsing, allowing for string like "Today is + January 1, 2047 at 8:21:00AM". + + :param fuzzy_with_tokens: + If ``True``, ``fuzzy`` is automatically set to True, and the parser + will return a tuple where the first element is the parsed + :class:`datetime.datetime` datetimestamp and the second element is + a tuple containing the portions of the string which were ignored: + + .. doctest:: + + >>> from dateutil.parser import parse + >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) + (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) + + """ + if fuzzy_with_tokens: + fuzzy = True + + info = self.info + + if dayfirst is None: + dayfirst = info.dayfirst + + if yearfirst is None: + yearfirst = info.yearfirst + + res = self._result() + l = _timelex.split(timestr) # Splits the timestr into tokens + + skipped_idxs = [] + + # year/month/day list + ymd = _ymd() + + len_l = len(l) + i = 0 + try: + while i < len_l: + + # Check if it's a number + value_repr = l[i] + try: + value = float(value_repr) + except ValueError: + value = None + + if value is not None: + # Numeric token + i = self._parse_numeric_token(l, i, info, ymd, res, fuzzy) + + # Check weekday + elif info.weekday(l[i]) is not None: + value = info.weekday(l[i]) + res.weekday = value + + # Check month name + elif info.month(l[i]) is not None: + value = info.month(l[i]) + ymd.append(value, 'M') + + if i + 1 < len_l: + if l[i + 1] in ('-', '/'): + # Jan-01[-99] + sep = l[i + 1] + ymd.append(l[i + 2]) + + if i + 3 < len_l and l[i + 3] == sep: + # Jan-01-99 + ymd.append(l[i + 4]) + i += 2 + + i += 2 + + elif (i + 4 < len_l and l[i + 1] == l[i + 3] == ' ' and + info.pertain(l[i + 2])): + # Jan of 01 + # In this case, 01 is clearly year + if l[i + 4].isdigit(): + # Convert it here to become unambiguous + value = int(l[i + 4]) + year = str(info.convertyear(value)) + ymd.append(year, 'Y') + else: + # Wrong guess + pass + # TODO: not hit in tests + i += 4 + + # Check am/pm + elif info.ampm(l[i]) is not None: + value = info.ampm(l[i]) + val_is_ampm = self._ampm_valid(res.hour, res.ampm, fuzzy) + + if val_is_ampm: + res.hour = self._adjust_ampm(res.hour, value) + res.ampm = value + + elif fuzzy: + skipped_idxs.append(i) + + # Check for a timezone name + elif self._could_be_tzname(res.hour, res.tzname, res.tzoffset, l[i]): + res.tzname = l[i] + res.tzoffset = info.tzoffset(res.tzname) + + # Check for something like GMT+3, or BRST+3. Notice + # that it doesn't mean "I am 3 hours after GMT", but + # "my time +3 is GMT". If found, we reverse the + # logic so that timezone parsing code will get it + # right. + if i + 1 < len_l and l[i + 1] in ('+', '-'): + l[i + 1] = ('+', '-')[l[i + 1] == '+'] + res.tzoffset = None + if info.utczone(res.tzname): + # With something like GMT+3, the timezone + # is *not* GMT. + res.tzname = None + + # Check for a numbered timezone + elif res.hour is not None and l[i] in ('+', '-'): + signal = (-1, 1)[l[i] == '+'] + len_li = len(l[i + 1]) + + # TODO: check that l[i + 1] is integer? + if len_li == 4: + # -0300 + hour_offset = int(l[i + 1][:2]) + min_offset = int(l[i + 1][2:]) + elif i + 2 < len_l and l[i + 2] == ':': + # -03:00 + hour_offset = int(l[i + 1]) + min_offset = int(l[i + 3]) # TODO: Check that l[i+3] is minute-like? + i += 2 + elif len_li <= 2: + # -[0]3 + hour_offset = int(l[i + 1][:2]) + min_offset = 0 + else: + raise ValueError(timestr) + + res.tzoffset = signal * (hour_offset * 3600 + min_offset * 60) + + # Look for a timezone name between parenthesis + if (i + 5 < len_l and + info.jump(l[i + 2]) and l[i + 3] == '(' and + l[i + 5] == ')' and + 3 <= len(l[i + 4]) and + self._could_be_tzname(res.hour, res.tzname, + None, l[i + 4])): + # -0300 (BRST) + res.tzname = l[i + 4] + i += 4 + + i += 1 + + # Check jumps + elif not (info.jump(l[i]) or fuzzy): + raise ValueError(timestr) + + else: + skipped_idxs.append(i) + i += 1 + + # Process year/month/day + year, month, day = ymd.resolve_ymd(yearfirst, dayfirst) + + res.century_specified = ymd.century_specified + res.year = year + res.month = month + res.day = day + + except (IndexError, ValueError): + return None, None + + if not info.validate(res): + return None, None + + if fuzzy_with_tokens: + skipped_tokens = self._recombine_skipped(l, skipped_idxs) + return res, tuple(skipped_tokens) + else: + return res, None + + def _parse_numeric_token(self, tokens, idx, info, ymd, res, fuzzy): + # Token is a number + value_repr = tokens[idx] + try: + value = self._to_decimal(value_repr) + except Exception as e: + six.raise_from(ValueError('Unknown numeric token'), e) + + len_li = len(value_repr) + + len_l = len(tokens) + + if (len(ymd) == 3 and len_li in (2, 4) and + res.hour is None and + (idx + 1 >= len_l or + (tokens[idx + 1] != ':' and + info.hms(tokens[idx + 1]) is None))): + # 19990101T23[59] + s = tokens[idx] + res.hour = int(s[:2]) + + if len_li == 4: + res.minute = int(s[2:]) + + elif len_li == 6 or (len_li > 6 and tokens[idx].find('.') == 6): + # YYMMDD or HHMMSS[.ss] + s = tokens[idx] + + if not ymd and '.' not in tokens[idx]: + ymd.append(s[:2]) + ymd.append(s[2:4]) + ymd.append(s[4:]) + else: + # 19990101T235959[.59] + + # TODO: Check if res attributes already set. + res.hour = int(s[:2]) + res.minute = int(s[2:4]) + res.second, res.microsecond = self._parsems(s[4:]) + + elif len_li in (8, 12, 14): + # YYYYMMDD + s = tokens[idx] + ymd.append(s[:4], 'Y') + ymd.append(s[4:6]) + ymd.append(s[6:8]) + + if len_li > 8: + res.hour = int(s[8:10]) + res.minute = int(s[10:12]) + + if len_li > 12: + res.second = int(s[12:]) + + elif self._find_hms_idx(idx, tokens, info, allow_jump=True) is not None: + # HH[ ]h or MM[ ]m or SS[.ss][ ]s + hms_idx = self._find_hms_idx(idx, tokens, info, allow_jump=True) + (idx, hms) = self._parse_hms(idx, tokens, info, hms_idx) + if hms is not None: + # TODO: checking that hour/minute/second are not + # already set? + self._assign_hms(res, value_repr, hms) + + elif idx + 2 < len_l and tokens[idx + 1] == ':': + # HH:MM[:SS[.ss]] + res.hour = int(value) + value = self._to_decimal(tokens[idx + 2]) # TODO: try/except for this? + (res.minute, res.second) = self._parse_min_sec(value) + + if idx + 4 < len_l and tokens[idx + 3] == ':': + res.second, res.microsecond = self._parsems(tokens[idx + 4]) + + idx += 2 + + idx += 2 + + elif idx + 1 < len_l and tokens[idx + 1] in ('-', '/', '.'): + sep = tokens[idx + 1] + ymd.append(value_repr) + + if idx + 2 < len_l and not info.jump(tokens[idx + 2]): + if tokens[idx + 2].isdigit(): + # 01-01[-01] + ymd.append(tokens[idx + 2]) + else: + # 01-Jan[-01] + value = info.month(tokens[idx + 2]) + + if value is not None: + ymd.append(value, 'M') + else: + raise ValueError() + + if idx + 3 < len_l and tokens[idx + 3] == sep: + # We have three members + value = info.month(tokens[idx + 4]) + + if value is not None: + ymd.append(value, 'M') + else: + ymd.append(tokens[idx + 4]) + idx += 2 + + idx += 1 + idx += 1 + + elif idx + 1 >= len_l or info.jump(tokens[idx + 1]): + if idx + 2 < len_l and info.ampm(tokens[idx + 2]) is not None: + # 12 am + hour = int(value) + res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 2])) + idx += 1 + else: + # Year, month or day + ymd.append(value) + idx += 1 + + elif info.ampm(tokens[idx + 1]) is not None and (0 <= value < 24): + # 12am + hour = int(value) + res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 1])) + idx += 1 + + elif ymd.could_be_day(value): + ymd.append(value) + + elif not fuzzy: + raise ValueError() + + return idx + + def _find_hms_idx(self, idx, tokens, info, allow_jump): + len_l = len(tokens) + + if idx+1 < len_l and info.hms(tokens[idx+1]) is not None: + # There is an "h", "m", or "s" label following this token. We take + # assign the upcoming label to the current token. + # e.g. the "12" in 12h" + hms_idx = idx + 1 + + elif (allow_jump and idx+2 < len_l and tokens[idx+1] == ' ' and + info.hms(tokens[idx+2]) is not None): + # There is a space and then an "h", "m", or "s" label. + # e.g. the "12" in "12 h" + hms_idx = idx + 2 + + elif idx > 0 and info.hms(tokens[idx-1]) is not None: + # There is a "h", "m", or "s" preceeding this token. Since neither + # of the previous cases was hit, there is no label following this + # token, so we use the previous label. + # e.g. the "04" in "12h04" + hms_idx = idx-1 + + elif (1 < idx == len_l-1 and tokens[idx-1] == ' ' and + info.hms(tokens[idx-2]) is not None): + # If we are looking at the final token, we allow for a + # backward-looking check to skip over a space. + # TODO: Are we sure this is the right condition here? + hms_idx = idx - 2 + + else: + hms_idx = None + + return hms_idx + + def _assign_hms(self, res, value_repr, hms): + # See GH issue #427, fixing float rounding + value = self._to_decimal(value_repr) + + if hms == 0: + # Hour + res.hour = int(value) + if value % 1: + res.minute = int(60*(value % 1)) + + elif hms == 1: + (res.minute, res.second) = self._parse_min_sec(value) + + elif hms == 2: + (res.second, res.microsecond) = self._parsems(value_repr) + + def _could_be_tzname(self, hour, tzname, tzoffset, token): + return (hour is not None and + tzname is None and + tzoffset is None and + len(token) <= 5 and + all(x in string.ascii_uppercase for x in token)) + + def _ampm_valid(self, hour, ampm, fuzzy): + """ + For fuzzy parsing, 'a' or 'am' (both valid English words) + may erroneously trigger the AM/PM flag. Deal with that + here. + """ + val_is_ampm = True + + # If there's already an AM/PM flag, this one isn't one. + if fuzzy and ampm is not None: + val_is_ampm = False + + # If AM/PM is found and hour is not, raise a ValueError + if hour is None: + if fuzzy: + val_is_ampm = False + else: + raise ValueError('No hour specified with AM or PM flag.') + elif not 0 <= hour <= 12: + # If AM/PM is found, it's a 12 hour clock, so raise + # an error for invalid range + if fuzzy: + val_is_ampm = False + else: + raise ValueError('Invalid hour specified for 12-hour clock.') + + return val_is_ampm + + def _adjust_ampm(self, hour, ampm): + if hour < 12 and ampm == 1: + hour += 12 + elif hour == 12 and ampm == 0: + hour = 0 + return hour + + def _parse_min_sec(self, value): + # TODO: Every usage of this function sets res.second to the return + # value. Are there any cases where second will be returned as None and + # we *dont* want to set res.second = None? + minute = int(value) + second = None + + sec_remainder = value % 1 + if sec_remainder: + second = int(60 * sec_remainder) + return (minute, second) + + def _parsems(self, value): + """Parse a I[.F] seconds value into (seconds, microseconds).""" + if "." not in value: + return int(value), 0 + else: + i, f = value.split(".") + return int(i), int(f.ljust(6, "0")[:6]) + + def _parse_hms(self, idx, tokens, info, hms_idx): + # TODO: Is this going to admit a lot of false-positives for when we + # just happen to have digits and "h", "m" or "s" characters in non-date + # text? I guess hex hashes won't have that problem, but there's plenty + # of random junk out there. + if hms_idx is None: + hms = None + new_idx = idx + elif hms_idx > idx: + hms = info.hms(tokens[hms_idx]) + new_idx = hms_idx + else: + # Looking backwards, increment one. + hms = info.hms(tokens[hms_idx]) + 1 + new_idx = idx + + return (new_idx, hms) + + def _recombine_skipped(self, tokens, skipped_idxs): + """ + >>> tokens = ["foo", " ", "bar", " ", "19June2000", "baz"] + >>> skipped_idxs = [0, 1, 2, 5] + >>> _recombine_skipped(tokens, skipped_idxs) + ["foo bar", "baz"] + """ + skipped_tokens = [] + for i, idx in enumerate(sorted(skipped_idxs)): + if i > 0 and idx - 1 == skipped_idxs[i - 1]: + skipped_tokens[-1] = skipped_tokens[-1] + tokens[idx] + else: + skipped_tokens.append(tokens[idx]) + + return skipped_tokens + + def _build_tzinfo(self, tzinfos, tzname, tzoffset): + if callable(tzinfos): + tzdata = tzinfos(tzname, tzoffset) + else: + tzdata = tzinfos.get(tzname) + # handle case where tzinfo is paased an options that returns None + # eg tzinfos = {'BRST' : None} + if isinstance(tzdata, datetime.tzinfo) or tzdata is None: + tzinfo = tzdata + elif isinstance(tzdata, text_type): + tzinfo = tz.tzstr(tzdata) + elif isinstance(tzdata, integer_types): + tzinfo = tz.tzoffset(tzname, tzdata) + return tzinfo + + def _build_tzaware(self, naive, res, tzinfos): + if (callable(tzinfos) or (tzinfos and res.tzname in tzinfos)): + tzinfo = self._build_tzinfo(tzinfos, res.tzname, res.tzoffset) + aware = naive.replace(tzinfo=tzinfo) + aware = self._assign_tzname(aware, res.tzname) + + elif res.tzname and res.tzname in time.tzname: + aware = naive.replace(tzinfo=tz.tzlocal()) + + # Handle ambiguous local datetime + aware = self._assign_tzname(aware, res.tzname) + + # This is mostly relevant for winter GMT zones parsed in the UK + if (aware.tzname() != res.tzname and + res.tzname in self.info.UTCZONE): + aware = aware.replace(tzinfo=tz.tzutc()) + + elif res.tzoffset == 0: + aware = naive.replace(tzinfo=tz.tzutc()) + + elif res.tzoffset: + aware = naive.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) + + elif not res.tzname and not res.tzoffset: + # i.e. no timezone information was found. + aware = naive + + elif res.tzname: + # tz-like string was parsed but we don't know what to do + # with it + warnings.warn("tzname {tzname} identified but not understood. " + "Pass `tzinfos` argument in order to correctly " + "return a timezone-aware datetime. In a future " + "version, this will raise an " + "exception.".format(tzname=res.tzname), + category=UnknownTimezoneWarning) + aware = naive + + return aware + + def _build_naive(self, res, default): + repl = {} + for attr in ("year", "month", "day", "hour", + "minute", "second", "microsecond"): + value = getattr(res, attr) + if value is not None: + repl[attr] = value + + if 'day' not in repl: + # If the default day exceeds the last day of the month, fall back + # to the end of the month. + cyear = default.year if res.year is None else res.year + cmonth = default.month if res.month is None else res.month + cday = default.day if res.day is None else res.day + + if cday > monthrange(cyear, cmonth)[1]: + repl['day'] = monthrange(cyear, cmonth)[1] + + naive = default.replace(**repl) + + if res.weekday is not None and not res.day: + naive = naive + relativedelta.relativedelta(weekday=res.weekday) + + return naive + + def _assign_tzname(self, dt, tzname): + if dt.tzname() != tzname: + new_dt = tz.enfold(dt, fold=1) + if new_dt.tzname() == tzname: + return new_dt + + return dt + + def _to_decimal(self, val): + try: + decimal_value = Decimal(val) + # See GH 662, edge case, infinite value should not be converted via `_to_decimal` + if not decimal_value.is_finite(): + raise ValueError("Converted decimal value is infinite or NaN") + except Exception as e: + msg = "Could not convert %s to decimal" % val + six.raise_from(ValueError(msg), e) + else: + return decimal_value + + +DEFAULTPARSER = parser() + + +def parse(timestr, parserinfo=None, **kwargs): + """ + + Parse a string in one of the supported formats, using the + ``parserinfo`` parameters. + + :param timestr: + A string containing a date/time stamp. + + :param parserinfo: + A :class:`parserinfo` object containing parameters for the parser. + If ``None``, the default arguments to the :class:`parserinfo` + constructor are used. + + The ``**kwargs`` parameter takes the following keyword arguments: + + :param default: + The default datetime object, if this is a datetime object and not + ``None``, elements specified in ``timestr`` replace elements in the + default object. + + :param ignoretz: + If set ``True``, time zones in parsed strings are ignored and a naive + :class:`datetime` object is returned. + + :param tzinfos: + Additional time zone names / aliases which may be present in the + string. This argument maps time zone names (and optionally offsets + from those time zones) to time zones. This parameter can be a + dictionary with timezone aliases mapping time zone names to time + zones or a function taking two parameters (``tzname`` and + ``tzoffset``) and returning a time zone. + + The timezones to which the names are mapped can be an integer + offset from UTC in seconds or a :class:`tzinfo` object. + + .. doctest:: + :options: +NORMALIZE_WHITESPACE + + >>> from dateutil.parser import parse + >>> from dateutil.tz import gettz + >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")} + >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200)) + >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) + datetime.datetime(2012, 1, 19, 17, 21, + tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) + + This parameter is ignored if ``ignoretz`` is set. + + :param dayfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the day (``True``) or month (``False``). If + ``yearfirst`` is set to ``True``, this distinguishes between YDM and + YMD. If set to ``None``, this value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param yearfirst: + Whether to interpret the first value in an ambiguous 3-integer date + (e.g. 01/05/09) as the year. If ``True``, the first number is taken to + be the year, otherwise the last number is taken to be the year. If + this is set to ``None``, the value is retrieved from the current + :class:`parserinfo` object (which itself defaults to ``False``). + + :param fuzzy: + Whether to allow fuzzy parsing, allowing for string like "Today is + January 1, 2047 at 8:21:00AM". + + :param fuzzy_with_tokens: + If ``True``, ``fuzzy`` is automatically set to True, and the parser + will return a tuple where the first element is the parsed + :class:`datetime.datetime` datetimestamp and the second element is + a tuple containing the portions of the string which were ignored: + + .. doctest:: + + >>> from dateutil.parser import parse + >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) + (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) + + :return: + Returns a :class:`datetime.datetime` object or, if the + ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the + first element being a :class:`datetime.datetime` object, the second + a tuple containing the fuzzy tokens. + + :raises ValueError: + Raised for invalid or unknown string format, if the provided + :class:`tzinfo` is not in a valid format, or if an invalid date + would be created. + + :raises OverflowError: + Raised if the parsed date exceeds the largest valid C integer on + your system. + """ + if parserinfo: + return parser(parserinfo).parse(timestr, **kwargs) + else: + return DEFAULTPARSER.parse(timestr, **kwargs) + + +class _tzparser(object): + + class _result(_resultbase): + + __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset", + "start", "end"] + + class _attr(_resultbase): + __slots__ = ["month", "week", "weekday", + "yday", "jyday", "day", "time"] + + def __repr__(self): + return self._repr("") + + def __init__(self): + _resultbase.__init__(self) + self.start = self._attr() + self.end = self._attr() + + def parse(self, tzstr): + res = self._result() + l = [x for x in re.split(r'([,:.]|[a-zA-Z]+|[0-9]+)',tzstr) if x] + used_idxs = list() + try: + + len_l = len(l) + + i = 0 + while i < len_l: + # BRST+3[BRDT[+2]] + j = i + while j < len_l and not [x for x in l[j] + if x in "0123456789:,-+"]: + j += 1 + if j != i: + if not res.stdabbr: + offattr = "stdoffset" + res.stdabbr = "".join(l[i:j]) + else: + offattr = "dstoffset" + res.dstabbr = "".join(l[i:j]) + + for ii in range(j): + used_idxs.append(ii) + i = j + if (i < len_l and (l[i] in ('+', '-') or l[i][0] in + "0123456789")): + if l[i] in ('+', '-'): + # Yes, that's right. See the TZ variable + # documentation. + signal = (1, -1)[l[i] == '+'] + used_idxs.append(i) + i += 1 + else: + signal = -1 + len_li = len(l[i]) + if len_li == 4: + # -0300 + setattr(res, offattr, (int(l[i][:2]) * 3600 + + int(l[i][2:]) * 60) * signal) + elif i + 1 < len_l and l[i + 1] == ':': + # -03:00 + setattr(res, offattr, + (int(l[i]) * 3600 + + int(l[i + 2]) * 60) * signal) + used_idxs.append(i) + i += 2 + elif len_li <= 2: + # -[0]3 + setattr(res, offattr, + int(l[i][:2]) * 3600 * signal) + else: + return None + used_idxs.append(i) + i += 1 + if res.dstabbr: + break + else: + break + + + if i < len_l: + for j in range(i, len_l): + if l[j] == ';': + l[j] = ',' + + assert l[i] == ',' + + i += 1 + + if i >= len_l: + pass + elif (8 <= l.count(',') <= 9 and + not [y for x in l[i:] if x != ',' + for y in x if y not in "0123456789+-"]): + # GMT0BST,3,0,30,3600,10,0,26,7200[,3600] + for x in (res.start, res.end): + x.month = int(l[i]) + used_idxs.append(i) + i += 2 + if l[i] == '-': + value = int(l[i + 1]) * -1 + used_idxs.append(i) + i += 1 + else: + value = int(l[i]) + used_idxs.append(i) + i += 2 + if value: + x.week = value + x.weekday = (int(l[i]) - 1) % 7 + else: + x.day = int(l[i]) + used_idxs.append(i) + i += 2 + x.time = int(l[i]) + used_idxs.append(i) + i += 2 + if i < len_l: + if l[i] in ('-', '+'): + signal = (-1, 1)[l[i] == "+"] + used_idxs.append(i) + i += 1 + else: + signal = 1 + used_idxs.append(i) + res.dstoffset = (res.stdoffset + int(l[i]) * signal) + + # This was a made-up format that is not in normal use + warn(('Parsed time zone "%s"' % tzstr) + + 'is in a non-standard dateutil-specific format, which ' + + 'is now deprecated; support for parsing this format ' + + 'will be removed in future versions. It is recommended ' + + 'that you switch to a standard format like the GNU ' + + 'TZ variable format.', tz.DeprecatedTzFormatWarning) + elif (l.count(',') == 2 and l[i:].count('/') <= 2 and + not [y for x in l[i:] if x not in (',', '/', 'J', 'M', + '.', '-', ':') + for y in x if y not in "0123456789"]): + for x in (res.start, res.end): + if l[i] == 'J': + # non-leap year day (1 based) + used_idxs.append(i) + i += 1 + x.jyday = int(l[i]) + elif l[i] == 'M': + # month[-.]week[-.]weekday + used_idxs.append(i) + i += 1 + x.month = int(l[i]) + used_idxs.append(i) + i += 1 + assert l[i] in ('-', '.') + used_idxs.append(i) + i += 1 + x.week = int(l[i]) + if x.week == 5: + x.week = -1 + used_idxs.append(i) + i += 1 + assert l[i] in ('-', '.') + used_idxs.append(i) + i += 1 + x.weekday = (int(l[i]) - 1) % 7 + else: + # year day (zero based) + x.yday = int(l[i]) + 1 + + used_idxs.append(i) + i += 1 + + if i < len_l and l[i] == '/': + used_idxs.append(i) + i += 1 + # start time + len_li = len(l[i]) + if len_li == 4: + # -0300 + x.time = (int(l[i][:2]) * 3600 + + int(l[i][2:]) * 60) + elif i + 1 < len_l and l[i + 1] == ':': + # -03:00 + x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60 + used_idxs.append(i) + i += 2 + if i + 1 < len_l and l[i + 1] == ':': + used_idxs.append(i) + i += 2 + x.time += int(l[i]) + elif len_li <= 2: + # -[0]3 + x.time = (int(l[i][:2]) * 3600) + else: + return None + used_idxs.append(i) + i += 1 + + assert i == len_l or l[i] == ',' + + i += 1 + + assert i >= len_l + + except (IndexError, ValueError, AssertionError): + return None + + unused_idxs = set(range(len_l)).difference(used_idxs) + res.any_unused_tokens = not {l[n] for n in unused_idxs}.issubset({",",":"}) + return res + + +DEFAULTTZPARSER = _tzparser() + + +def _parsetz(tzstr): + return DEFAULTTZPARSER.parse(tzstr) + +class UnknownTimezoneWarning(RuntimeWarning): + """Raised when the parser finds a timezone it cannot parse into a tzinfo""" +# vim:ts=4:sw=4:et diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/parser/isoparser.py b/thesisenv/lib/python3.6/site-packages/dateutil/parser/isoparser.py new file mode 100644 index 0000000..cd27f93 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/dateutil/parser/isoparser.py @@ -0,0 +1,406 @@ +# -*- coding: utf-8 -*- +""" +This module offers a parser for ISO-8601 strings + +It is intended to support all valid date, time and datetime formats per the +ISO-8601 specification. + +..versionadded:: 2.7.0 +""" +from datetime import datetime, timedelta, time, date +import calendar +from dateutil import tz + +from functools import wraps + +import re +import six + +__all__ = ["isoparse", "isoparser"] + + +def _takes_ascii(f): + @wraps(f) + def func(self, str_in, *args, **kwargs): + # If it's a stream, read the whole thing + str_in = getattr(str_in, 'read', lambda: str_in)() + + # If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII + if isinstance(str_in, six.text_type): + # ASCII is the same in UTF-8 + try: + str_in = str_in.encode('ascii') + except UnicodeEncodeError as e: + msg = 'ISO-8601 strings should contain only ASCII characters' + six.raise_from(ValueError(msg), e) + + return f(self, str_in, *args, **kwargs) + + return func + + +class isoparser(object): + def __init__(self, sep=None): + """ + :param sep: + A single character that separates date and time portions. If + ``None``, the parser will accept any single character. + For strict ISO-8601 adherence, pass ``'T'``. + """ + if sep is not None: + if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'): + raise ValueError('Separator must be a single, non-numeric ' + + 'ASCII character') + + sep = sep.encode('ascii') + + self._sep = sep + + @_takes_ascii + def isoparse(self, dt_str): + """ + Parse an ISO-8601 datetime string into a :class:`datetime.datetime`. + + An ISO-8601 datetime string consists of a date portion, followed + optionally by a time portion - the date and time portions are separated + by a single character separator, which is ``T`` in the official + standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be + combined with a time portion. + + Supported date formats are: + + Common: + + - ``YYYY`` + - ``YYYY-MM`` or ``YYYYMM`` + - ``YYYY-MM-DD`` or ``YYYYMMDD`` + + Uncommon: + + - ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0) + - ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day + + The ISO week and day numbering follows the same logic as + :func:`datetime.date.isocalendar`. + + Supported time formats are: + + - ``hh`` + - ``hh:mm`` or ``hhmm`` + - ``hh:mm:ss`` or ``hhmmss`` + - ``hh:mm:ss.sss`` or ``hh:mm:ss.ssssss`` (3-6 sub-second digits) + + Midnight is a special case for `hh`, as the standard supports both + 00:00 and 24:00 as a representation. + + .. caution:: + + Support for fractional components other than seconds is part of the + ISO-8601 standard, but is not currently implemented in this parser. + + Supported time zone offset formats are: + + - `Z` (UTC) + - `±HH:MM` + - `±HHMM` + - `±HH` + + Offsets will be represented as :class:`dateutil.tz.tzoffset` objects, + with the exception of UTC, which will be represented as + :class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such + as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`. + + :param dt_str: + A string or stream containing only an ISO-8601 datetime string + + :return: + Returns a :class:`datetime.datetime` representing the string. + Unspecified components default to their lowest value. + + .. warning:: + + As of version 2.7.0, the strictness of the parser should not be + considered a stable part of the contract. Any valid ISO-8601 string + that parses correctly with the default settings will continue to + parse correctly in future versions, but invalid strings that + currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not + guaranteed to continue failing in future versions if they encode + a valid date. + + .. versionadded:: 2.7.0 + """ + components, pos = self._parse_isodate(dt_str) + + if len(dt_str) > pos: + if self._sep is None or dt_str[pos:pos + 1] == self._sep: + components += self._parse_isotime(dt_str[pos + 1:]) + else: + raise ValueError('String contains unknown ISO components') + + return datetime(*components) + + @_takes_ascii + def parse_isodate(self, datestr): + """ + Parse the date portion of an ISO string. + + :param datestr: + The string portion of an ISO string, without a separator + + :return: + Returns a :class:`datetime.date` object + """ + components, pos = self._parse_isodate(datestr) + if pos < len(datestr): + raise ValueError('String contains unknown ISO ' + + 'components: {}'.format(datestr)) + return date(*components) + + @_takes_ascii + def parse_isotime(self, timestr): + """ + Parse the time portion of an ISO string. + + :param timestr: + The time portion of an ISO string, without a separator + + :return: + Returns a :class:`datetime.time` object + """ + return time(*self._parse_isotime(timestr)) + + @_takes_ascii + def parse_tzstr(self, tzstr, zero_as_utc=True): + """ + Parse a valid ISO time zone string. + + See :func:`isoparser.isoparse` for details on supported formats. + + :param tzstr: + A string representing an ISO time zone offset + + :param zero_as_utc: + Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones + + :return: + Returns :class:`dateutil.tz.tzoffset` for offsets and + :class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is + specified) offsets equivalent to UTC. + """ + return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc) + + # Constants + _MICROSECOND_END_REGEX = re.compile(b'[-+Z]+') + _DATE_SEP = b'-' + _TIME_SEP = b':' + _MICRO_SEP = b'.' + + def _parse_isodate(self, dt_str): + try: + return self._parse_isodate_common(dt_str) + except ValueError: + return self._parse_isodate_uncommon(dt_str) + + def _parse_isodate_common(self, dt_str): + len_str = len(dt_str) + components = [1, 1, 1] + + if len_str < 4: + raise ValueError('ISO string too short') + + # Year + components[0] = int(dt_str[0:4]) + pos = 4 + if pos >= len_str: + return components, pos + + has_sep = dt_str[pos:pos + 1] == self._DATE_SEP + if has_sep: + pos += 1 + + # Month + if len_str - pos < 2: + raise ValueError('Invalid common month') + + components[1] = int(dt_str[pos:pos + 2]) + pos += 2 + + if pos >= len_str: + if has_sep: + return components, pos + else: + raise ValueError('Invalid ISO format') + + if has_sep: + if dt_str[pos:pos + 1] != self._DATE_SEP: + raise ValueError('Invalid separator in ISO string') + pos += 1 + + # Day + if len_str - pos < 2: + raise ValueError('Invalid common day') + components[2] = int(dt_str[pos:pos + 2]) + return components, pos + 2 + + def _parse_isodate_uncommon(self, dt_str): + if len(dt_str) < 4: + raise ValueError('ISO string too short') + + # All ISO formats start with the year + year = int(dt_str[0:4]) + + has_sep = dt_str[4:5] == self._DATE_SEP + + pos = 4 + has_sep # Skip '-' if it's there + if dt_str[pos:pos + 1] == b'W': + # YYYY-?Www-?D? + pos += 1 + weekno = int(dt_str[pos:pos + 2]) + pos += 2 + + dayno = 1 + if len(dt_str) > pos: + if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep: + raise ValueError('Inconsistent use of dash separator') + + pos += has_sep + + dayno = int(dt_str[pos:pos + 1]) + pos += 1 + + base_date = self._calculate_weekdate(year, weekno, dayno) + else: + # YYYYDDD or YYYY-DDD + if len(dt_str) - pos < 3: + raise ValueError('Invalid ordinal day') + + ordinal_day = int(dt_str[pos:pos + 3]) + pos += 3 + + if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)): + raise ValueError('Invalid ordinal day' + + ' {} for year {}'.format(ordinal_day, year)) + + base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1) + + components = [base_date.year, base_date.month, base_date.day] + return components, pos + + def _calculate_weekdate(self, year, week, day): + """ + Calculate the day of corresponding to the ISO year-week-day calendar. + + This function is effectively the inverse of + :func:`datetime.date.isocalendar`. + + :param year: + The year in the ISO calendar + + :param week: + The week in the ISO calendar - range is [1, 53] + + :param day: + The day in the ISO calendar - range is [1 (MON), 7 (SUN)] + + :return: + Returns a :class:`datetime.date` + """ + if not 0 < week < 54: + raise ValueError('Invalid week: {}'.format(week)) + + if not 0 < day < 8: # Range is 1-7 + raise ValueError('Invalid weekday: {}'.format(day)) + + # Get week 1 for the specific year: + jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it + week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1) + + # Now add the specific number of weeks and days to get what we want + week_offset = (week - 1) * 7 + (day - 1) + return week_1 + timedelta(days=week_offset) + + def _parse_isotime(self, timestr): + len_str = len(timestr) + components = [0, 0, 0, 0, None] + pos = 0 + comp = -1 + + if len(timestr) < 2: + raise ValueError('ISO time too short') + + has_sep = len_str >= 3 and timestr[2:3] == self._TIME_SEP + + while pos < len_str and comp < 5: + comp += 1 + + if timestr[pos:pos + 1] in b'-+Z': + # Detect time zone boundary + components[-1] = self._parse_tzstr(timestr[pos:]) + pos = len_str + break + + if comp < 3: + # Hour, minute, second + components[comp] = int(timestr[pos:pos + 2]) + pos += 2 + if (has_sep and pos < len_str and + timestr[pos:pos + 1] == self._TIME_SEP): + pos += 1 + + if comp == 3: + # Microsecond + if timestr[pos:pos + 1] != self._MICRO_SEP: + continue + + pos += 1 + us_str = self._MICROSECOND_END_REGEX.split(timestr[pos:pos + 6], + 1)[0] + + components[comp] = int(us_str) * 10**(6 - len(us_str)) + pos += len(us_str) + + if pos < len_str: + raise ValueError('Unused components in ISO string') + + if components[0] == 24: + # Standard supports 00:00 and 24:00 as representations of midnight + if any(component != 0 for component in components[1:4]): + raise ValueError('Hour may only be 24 at 24:00:00.000') + components[0] = 0 + + return components + + def _parse_tzstr(self, tzstr, zero_as_utc=True): + if tzstr == b'Z': + return tz.tzutc() + + if len(tzstr) not in {3, 5, 6}: + raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters') + + if tzstr[0:1] == b'-': + mult = -1 + elif tzstr[0:1] == b'+': + mult = 1 + else: + raise ValueError('Time zone offset requires sign') + + hours = int(tzstr[1:3]) + if len(tzstr) == 3: + minutes = 0 + else: + minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):]) + + if zero_as_utc and hours == 0 and minutes == 0: + return tz.tzutc() + else: + if minutes > 59: + raise ValueError('Invalid minutes in time zone offset') + + if hours > 23: + raise ValueError('Invalid hours in time zone offset') + + return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60) + + +DEFAULT_ISOPARSER = isoparser() +isoparse = DEFAULT_ISOPARSER.isoparse diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/relativedelta.py b/thesisenv/lib/python3.6/site-packages/dateutil/relativedelta.py new file mode 100644 index 0000000..1e0d616 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/dateutil/relativedelta.py @@ -0,0 +1,590 @@ +# -*- coding: utf-8 -*- +import datetime +import calendar + +import operator +from math import copysign + +from six import integer_types +from warnings import warn + +from ._common import weekday + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) + +__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + + +class relativedelta(object): + """ + The relativedelta type is based on the specification of the excellent + work done by M.-A. Lemburg in his + `mx.DateTime `_ extension. + However, notice that this type does *NOT* implement the same algorithm as + his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. + + There are two different ways to build a relativedelta instance. The + first one is passing it two date/datetime classes:: + + relativedelta(datetime1, datetime2) + + The second one is passing it any number of the following keyword arguments:: + + relativedelta(arg1=x,arg2=y,arg3=z...) + + year, month, day, hour, minute, second, microsecond: + Absolute information (argument is singular); adding or subtracting a + relativedelta with absolute information does not perform an arithmetic + operation, but rather REPLACES the corresponding value in the + original datetime with the value(s) in relativedelta. + + years, months, weeks, days, hours, minutes, seconds, microseconds: + Relative information, may be negative (argument is plural); adding + or subtracting a relativedelta with relative information performs + the corresponding aritmetic operation on the original datetime value + with the information in the relativedelta. + + weekday: + One of the weekday instances (MO, TU, etc). These + instances may receive a parameter N, specifying the Nth + weekday, which could be positive or negative (like MO(+1) + or MO(-2). Not specifying it is the same as specifying + +1. You can also use an integer, where 0=MO. Notice that + if the calculated date is already Monday, for example, + using MO(1) or MO(-1) won't change the day. + + leapdays: + Will add given days to the date found, if year is a leap + year, and the date found is post 28 of february. + + yearday, nlyearday: + Set the yearday or the non-leap year day (jump leap days). + These are converted to day/month/leapdays information. + + There are relative and absolute forms of the keyword + arguments. The plural is relative, and the singular is + absolute. For each argument in the order below, the absolute form + is applied first (by setting each attribute to that value) and + then the relative form (by adding the value to the attribute). + + The order of attributes considered when this relativedelta is + added to a datetime is: + + 1. Year + 2. Month + 3. Day + 4. Hours + 5. Minutes + 6. Seconds + 7. Microseconds + + Finally, weekday is applied, using the rule described above. + + For example + + >>> dt = datetime(2018, 4, 9, 13, 37, 0) + >>> delta = relativedelta(hours=25, day=1, weekday=MO(1)) + datetime(2018, 4, 2, 14, 37, 0) + + First, the day is set to 1 (the first of the month), then 25 hours + are added, to get to the 2nd day and 14th hour, finally the + weekday is applied, but since the 2nd is already a Monday there is + no effect. + + """ + + def __init__(self, dt1=None, dt2=None, + years=0, months=0, days=0, leapdays=0, weeks=0, + hours=0, minutes=0, seconds=0, microseconds=0, + year=None, month=None, day=None, weekday=None, + yearday=None, nlyearday=None, + hour=None, minute=None, second=None, microsecond=None): + + if dt1 and dt2: + # datetime is a subclass of date. So both must be date + if not (isinstance(dt1, datetime.date) and + isinstance(dt2, datetime.date)): + raise TypeError("relativedelta only diffs datetime/date") + + # We allow two dates, or two datetimes, so we coerce them to be + # of the same type + if (isinstance(dt1, datetime.datetime) != + isinstance(dt2, datetime.datetime)): + if not isinstance(dt1, datetime.datetime): + dt1 = datetime.datetime.fromordinal(dt1.toordinal()) + elif not isinstance(dt2, datetime.datetime): + dt2 = datetime.datetime.fromordinal(dt2.toordinal()) + + self.years = 0 + self.months = 0 + self.days = 0 + self.leapdays = 0 + self.hours = 0 + self.minutes = 0 + self.seconds = 0 + self.microseconds = 0 + self.year = None + self.month = None + self.day = None + self.weekday = None + self.hour = None + self.minute = None + self.second = None + self.microsecond = None + self._has_time = 0 + + # Get year / month delta between the two + months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month) + self._set_months(months) + + # Remove the year/month delta so the timedelta is just well-defined + # time units (seconds, days and microseconds) + dtm = self.__radd__(dt2) + + # If we've overshot our target, make an adjustment + if dt1 < dt2: + compare = operator.gt + increment = 1 + else: + compare = operator.lt + increment = -1 + + while compare(dt1, dtm): + months += increment + self._set_months(months) + dtm = self.__radd__(dt2) + + # Get the timedelta between the "months-adjusted" date and dt1 + delta = dt1 - dtm + self.seconds = delta.seconds + delta.days * 86400 + self.microseconds = delta.microseconds + else: + # Check for non-integer values in integer-only quantities + if any(x is not None and x != int(x) for x in (years, months)): + raise ValueError("Non-integer years and months are " + "ambiguous and not currently supported.") + + # Relative information + self.years = int(years) + self.months = int(months) + self.days = days + weeks * 7 + self.leapdays = leapdays + self.hours = hours + self.minutes = minutes + self.seconds = seconds + self.microseconds = microseconds + + # Absolute information + self.year = year + self.month = month + self.day = day + self.hour = hour + self.minute = minute + self.second = second + self.microsecond = microsecond + + if any(x is not None and int(x) != x + for x in (year, month, day, hour, + minute, second, microsecond)): + # For now we'll deprecate floats - later it'll be an error. + warn("Non-integer value passed as absolute information. " + + "This is not a well-defined condition and will raise " + + "errors in future versions.", DeprecationWarning) + + if isinstance(weekday, integer_types): + self.weekday = weekdays[weekday] + else: + self.weekday = weekday + + yday = 0 + if nlyearday: + yday = nlyearday + elif yearday: + yday = yearday + if yearday > 59: + self.leapdays = -1 + if yday: + ydayidx = [31, 59, 90, 120, 151, 181, 212, + 243, 273, 304, 334, 366] + for idx, ydays in enumerate(ydayidx): + if yday <= ydays: + self.month = idx+1 + if idx == 0: + self.day = yday + else: + self.day = yday-ydayidx[idx-1] + break + else: + raise ValueError("invalid year day (%d)" % yday) + + self._fix() + + def _fix(self): + if abs(self.microseconds) > 999999: + s = _sign(self.microseconds) + div, mod = divmod(self.microseconds * s, 1000000) + self.microseconds = mod * s + self.seconds += div * s + if abs(self.seconds) > 59: + s = _sign(self.seconds) + div, mod = divmod(self.seconds * s, 60) + self.seconds = mod * s + self.minutes += div * s + if abs(self.minutes) > 59: + s = _sign(self.minutes) + div, mod = divmod(self.minutes * s, 60) + self.minutes = mod * s + self.hours += div * s + if abs(self.hours) > 23: + s = _sign(self.hours) + div, mod = divmod(self.hours * s, 24) + self.hours = mod * s + self.days += div * s + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years += div * s + if (self.hours or self.minutes or self.seconds or self.microseconds + or self.hour is not None or self.minute is not None or + self.second is not None or self.microsecond is not None): + self._has_time = 1 + else: + self._has_time = 0 + + @property + def weeks(self): + return int(self.days / 7.0) + + @weeks.setter + def weeks(self, value): + self.days = self.days - (self.weeks * 7) + value * 7 + + def _set_months(self, months): + self.months = months + if abs(self.months) > 11: + s = _sign(self.months) + div, mod = divmod(self.months * s, 12) + self.months = mod * s + self.years = div * s + else: + self.years = 0 + + def normalized(self): + """ + Return a version of this object represented entirely using integer + values for the relative attributes. + + >>> relativedelta(days=1.5, hours=2).normalized() + relativedelta(days=1, hours=14) + + :return: + Returns a :class:`dateutil.relativedelta.relativedelta` object. + """ + # Cascade remainders down (rounding each to roughly nearest microsecond) + days = int(self.days) + + hours_f = round(self.hours + 24 * (self.days - days), 11) + hours = int(hours_f) + + minutes_f = round(self.minutes + 60 * (hours_f - hours), 10) + minutes = int(minutes_f) + + seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8) + seconds = int(seconds_f) + + microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds)) + + # Constructor carries overflow back up with call to _fix() + return self.__class__(years=self.years, months=self.months, + days=days, hours=hours, minutes=minutes, + seconds=seconds, microseconds=microseconds, + leapdays=self.leapdays, year=self.year, + month=self.month, day=self.day, + weekday=self.weekday, hour=self.hour, + minute=self.minute, second=self.second, + microsecond=self.microsecond) + + def __add__(self, other): + if isinstance(other, relativedelta): + return self.__class__(years=other.years + self.years, + months=other.months + self.months, + days=other.days + self.days, + hours=other.hours + self.hours, + minutes=other.minutes + self.minutes, + seconds=other.seconds + self.seconds, + microseconds=(other.microseconds + + self.microseconds), + leapdays=other.leapdays or self.leapdays, + year=(other.year if other.year is not None + else self.year), + month=(other.month if other.month is not None + else self.month), + day=(other.day if other.day is not None + else self.day), + weekday=(other.weekday if other.weekday is not None + else self.weekday), + hour=(other.hour if other.hour is not None + else self.hour), + minute=(other.minute if other.minute is not None + else self.minute), + second=(other.second if other.second is not None + else self.second), + microsecond=(other.microsecond if other.microsecond + is not None else + self.microsecond)) + if isinstance(other, datetime.timedelta): + return self.__class__(years=self.years, + months=self.months, + days=self.days + other.days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds + other.seconds, + microseconds=self.microseconds + other.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + if not isinstance(other, datetime.date): + return NotImplemented + elif self._has_time and not isinstance(other, datetime.datetime): + other = datetime.datetime.fromordinal(other.toordinal()) + year = (self.year or other.year)+self.years + month = self.month or other.month + if self.months: + assert 1 <= abs(self.months) <= 12 + month += self.months + if month > 12: + year += 1 + month -= 12 + elif month < 1: + year -= 1 + month += 12 + day = min(calendar.monthrange(year, month)[1], + self.day or other.day) + repl = {"year": year, "month": month, "day": day} + for attr in ["hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + repl[attr] = value + days = self.days + if self.leapdays and month > 2 and calendar.isleap(year): + days += self.leapdays + ret = (other.replace(**repl) + + datetime.timedelta(days=days, + hours=self.hours, + minutes=self.minutes, + seconds=self.seconds, + microseconds=self.microseconds)) + if self.weekday: + weekday, nth = self.weekday.weekday, self.weekday.n or 1 + jumpdays = (abs(nth) - 1) * 7 + if nth > 0: + jumpdays += (7 - ret.weekday() + weekday) % 7 + else: + jumpdays += (ret.weekday() - weekday) % 7 + jumpdays *= -1 + ret += datetime.timedelta(days=jumpdays) + return ret + + def __radd__(self, other): + return self.__add__(other) + + def __rsub__(self, other): + return self.__neg__().__radd__(other) + + def __sub__(self, other): + if not isinstance(other, relativedelta): + return NotImplemented # In case the other object defines __rsub__ + return self.__class__(years=self.years - other.years, + months=self.months - other.months, + days=self.days - other.days, + hours=self.hours - other.hours, + minutes=self.minutes - other.minutes, + seconds=self.seconds - other.seconds, + microseconds=self.microseconds - other.microseconds, + leapdays=self.leapdays or other.leapdays, + year=(self.year if self.year is not None + else other.year), + month=(self.month if self.month is not None else + other.month), + day=(self.day if self.day is not None else + other.day), + weekday=(self.weekday if self.weekday is not None else + other.weekday), + hour=(self.hour if self.hour is not None else + other.hour), + minute=(self.minute if self.minute is not None else + other.minute), + second=(self.second if self.second is not None else + other.second), + microsecond=(self.microsecond if self.microsecond + is not None else + other.microsecond)) + + def __abs__(self): + return self.__class__(years=abs(self.years), + months=abs(self.months), + days=abs(self.days), + hours=abs(self.hours), + minutes=abs(self.minutes), + seconds=abs(self.seconds), + microseconds=abs(self.microseconds), + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __neg__(self): + return self.__class__(years=-self.years, + months=-self.months, + days=-self.days, + hours=-self.hours, + minutes=-self.minutes, + seconds=-self.seconds, + microseconds=-self.microseconds, + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + def __bool__(self): + return not (not self.years and + not self.months and + not self.days and + not self.hours and + not self.minutes and + not self.seconds and + not self.microseconds and + not self.leapdays and + self.year is None and + self.month is None and + self.day is None and + self.weekday is None and + self.hour is None and + self.minute is None and + self.second is None and + self.microsecond is None) + # Compatibility with Python 2.x + __nonzero__ = __bool__ + + def __mul__(self, other): + try: + f = float(other) + except TypeError: + return NotImplemented + + return self.__class__(years=int(self.years * f), + months=int(self.months * f), + days=int(self.days * f), + hours=int(self.hours * f), + minutes=int(self.minutes * f), + seconds=int(self.seconds * f), + microseconds=int(self.microseconds * f), + leapdays=self.leapdays, + year=self.year, + month=self.month, + day=self.day, + weekday=self.weekday, + hour=self.hour, + minute=self.minute, + second=self.second, + microsecond=self.microsecond) + + __rmul__ = __mul__ + + def __eq__(self, other): + if not isinstance(other, relativedelta): + return NotImplemented + if self.weekday or other.weekday: + if not self.weekday or not other.weekday: + return False + if self.weekday.weekday != other.weekday.weekday: + return False + n1, n2 = self.weekday.n, other.weekday.n + if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): + return False + return (self.years == other.years and + self.months == other.months and + self.days == other.days and + self.hours == other.hours and + self.minutes == other.minutes and + self.seconds == other.seconds and + self.microseconds == other.microseconds and + self.leapdays == other.leapdays and + self.year == other.year and + self.month == other.month and + self.day == other.day and + self.hour == other.hour and + self.minute == other.minute and + self.second == other.second and + self.microsecond == other.microsecond) + + def __hash__(self): + return hash(( + self.weekday, + self.years, + self.months, + self.days, + self.hours, + self.minutes, + self.seconds, + self.microseconds, + self.leapdays, + self.year, + self.month, + self.day, + self.hour, + self.minute, + self.second, + self.microsecond, + )) + + def __ne__(self, other): + return not self.__eq__(other) + + def __div__(self, other): + try: + reciprocal = 1 / float(other) + except TypeError: + return NotImplemented + + return self.__mul__(reciprocal) + + __truediv__ = __div__ + + def __repr__(self): + l = [] + for attr in ["years", "months", "days", "leapdays", + "hours", "minutes", "seconds", "microseconds"]: + value = getattr(self, attr) + if value: + l.append("{attr}={value:+g}".format(attr=attr, value=value)) + for attr in ["year", "month", "day", "weekday", + "hour", "minute", "second", "microsecond"]: + value = getattr(self, attr) + if value is not None: + l.append("{attr}={value}".format(attr=attr, value=repr(value))) + return "{classname}({attrs})".format(classname=self.__class__.__name__, + attrs=", ".join(l)) + + +def _sign(x): + return int(copysign(1, x)) + +# vim:ts=4:sw=4:et diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/rrule.py b/thesisenv/lib/python3.6/site-packages/dateutil/rrule.py new file mode 100644 index 0000000..8e9c2af --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/dateutil/rrule.py @@ -0,0 +1,1672 @@ +# -*- coding: utf-8 -*- +""" +The rrule module offers a small, complete, and very fast, implementation of +the recurrence rules documented in the +`iCalendar RFC `_, +including support for caching of results. +""" +import itertools +import datetime +import calendar +import re +import sys + +try: + from math import gcd +except ImportError: + from fractions import gcd + +from six import advance_iterator, integer_types +from six.moves import _thread, range +import heapq + +from ._common import weekday as weekdaybase +from .tz import tzutc, tzlocal + +# For warning about deprecation of until and count +from warnings import warn + +__all__ = ["rrule", "rruleset", "rrulestr", + "YEARLY", "MONTHLY", "WEEKLY", "DAILY", + "HOURLY", "MINUTELY", "SECONDLY", + "MO", "TU", "WE", "TH", "FR", "SA", "SU"] + +# Every mask is 7 days longer to handle cross-year weekly periods. +M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 + + [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7) +M365MASK = list(M366MASK) +M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32)) +MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +MDAY365MASK = list(MDAY366MASK) +M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0)) +NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) +NMDAY365MASK = list(NMDAY366MASK) +M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) +M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) +WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55 +del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31] +MDAY365MASK = tuple(MDAY365MASK) +M365MASK = tuple(M365MASK) + +FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY'] + +(YEARLY, + MONTHLY, + WEEKLY, + DAILY, + HOURLY, + MINUTELY, + SECONDLY) = list(range(7)) + +# Imported on demand. +easter = None +parser = None + + +class weekday(weekdaybase): + """ + This version of weekday does not allow n = 0. + """ + def __init__(self, wkday, n=None): + if n == 0: + raise ValueError("Can't create weekday with n==0") + + super(weekday, self).__init__(wkday, n) + + +MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) + + +def _invalidates_cache(f): + """ + Decorator for rruleset methods which may invalidate the + cached length. + """ + def inner_func(self, *args, **kwargs): + rv = f(self, *args, **kwargs) + self._invalidate_cache() + return rv + + return inner_func + + +class rrulebase(object): + def __init__(self, cache=False): + if cache: + self._cache = [] + self._cache_lock = _thread.allocate_lock() + self._invalidate_cache() + else: + self._cache = None + self._cache_complete = False + self._len = None + + def __iter__(self): + if self._cache_complete: + return iter(self._cache) + elif self._cache is None: + return self._iter() + else: + return self._iter_cached() + + def _invalidate_cache(self): + if self._cache is not None: + self._cache = [] + self._cache_complete = False + self._cache_gen = self._iter() + + if self._cache_lock.locked(): + self._cache_lock.release() + + self._len = None + + def _iter_cached(self): + i = 0 + gen = self._cache_gen + cache = self._cache + acquire = self._cache_lock.acquire + release = self._cache_lock.release + while gen: + if i == len(cache): + acquire() + if self._cache_complete: + break + try: + for j in range(10): + cache.append(advance_iterator(gen)) + except StopIteration: + self._cache_gen = gen = None + self._cache_complete = True + break + release() + yield cache[i] + i += 1 + while i < self._len: + yield cache[i] + i += 1 + + def __getitem__(self, item): + if self._cache_complete: + return self._cache[item] + elif isinstance(item, slice): + if item.step and item.step < 0: + return list(iter(self))[item] + else: + return list(itertools.islice(self, + item.start or 0, + item.stop or sys.maxsize, + item.step or 1)) + elif item >= 0: + gen = iter(self) + try: + for i in range(item+1): + res = advance_iterator(gen) + except StopIteration: + raise IndexError + return res + else: + return list(iter(self))[item] + + def __contains__(self, item): + if self._cache_complete: + return item in self._cache + else: + for i in self: + if i == item: + return True + elif i > item: + return False + return False + + # __len__() introduces a large performance penality. + def count(self): + """ Returns the number of recurrences in this set. It will have go + trough the whole recurrence, if this hasn't been done before. """ + if self._len is None: + for x in self: + pass + return self._len + + def before(self, dt, inc=False): + """ Returns the last recurrence before the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + last = None + if inc: + for i in gen: + if i > dt: + break + last = i + else: + for i in gen: + if i >= dt: + break + last = i + return last + + def after(self, dt, inc=False): + """ Returns the first recurrence after the given datetime instance. The + inc keyword defines what happens if dt is an occurrence. With + inc=True, if dt itself is an occurrence, it will be returned. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + if inc: + for i in gen: + if i >= dt: + return i + else: + for i in gen: + if i > dt: + return i + return None + + def xafter(self, dt, count=None, inc=False): + """ + Generator which yields up to `count` recurrences after the given + datetime instance, equivalent to `after`. + + :param dt: + The datetime at which to start generating recurrences. + + :param count: + The maximum number of recurrences to generate. If `None` (default), + dates are generated until the recurrence rule is exhausted. + + :param inc: + If `dt` is an instance of the rule and `inc` is `True`, it is + included in the output. + + :yields: Yields a sequence of `datetime` objects. + """ + + if self._cache_complete: + gen = self._cache + else: + gen = self + + # Select the comparison function + if inc: + comp = lambda dc, dtc: dc >= dtc + else: + comp = lambda dc, dtc: dc > dtc + + # Generate dates + n = 0 + for d in gen: + if comp(d, dt): + if count is not None: + n += 1 + if n > count: + break + + yield d + + def between(self, after, before, inc=False, count=1): + """ Returns all the occurrences of the rrule between after and before. + The inc keyword defines what happens if after and/or before are + themselves occurrences. With inc=True, they will be included in the + list, if they are found in the recurrence set. """ + if self._cache_complete: + gen = self._cache + else: + gen = self + started = False + l = [] + if inc: + for i in gen: + if i > before: + break + elif not started: + if i >= after: + started = True + l.append(i) + else: + l.append(i) + else: + for i in gen: + if i >= before: + break + elif not started: + if i > after: + started = True + l.append(i) + else: + l.append(i) + return l + + +class rrule(rrulebase): + """ + That's the base of the rrule operation. It accepts all the keywords + defined in the RFC as its constructor parameters (except byday, + which was renamed to byweekday) and more. The constructor prototype is:: + + rrule(freq) + + Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, + or SECONDLY. + + .. note:: + Per RFC section 3.3.10, recurrence instances falling on invalid dates + and times are ignored rather than coerced: + + Recurrence rules may generate recurrence instances with an invalid + date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM + on a day where the local time is moved forward by an hour at 1:00 + AM). Such recurrence instances MUST be ignored and MUST NOT be + counted as part of the recurrence set. + + This can lead to possibly surprising behavior when, for example, the + start date occurs at the end of the month: + + >>> from dateutil.rrule import rrule, MONTHLY + >>> from datetime import datetime + >>> start_date = datetime(2014, 12, 31) + >>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date)) + ... # doctest: +NORMALIZE_WHITESPACE + [datetime.datetime(2014, 12, 31, 0, 0), + datetime.datetime(2015, 1, 31, 0, 0), + datetime.datetime(2015, 3, 31, 0, 0), + datetime.datetime(2015, 5, 31, 0, 0)] + + Additionally, it supports the following keyword arguments: + + :param dtstart: + The recurrence start. Besides being the base for the recurrence, + missing parameters in the final recurrence instances will also be + extracted from this date. If not given, datetime.now() will be used + instead. + :param interval: + The interval between each freq iteration. For example, when using + YEARLY, an interval of 2 means once every two years, but with HOURLY, + it means once every two hours. The default interval is 1. + :param wkst: + The week start day. Must be one of the MO, TU, WE constants, or an + integer, specifying the first day of the week. This will affect + recurrences based on weekly periods. The default week start is got + from calendar.firstweekday(), and may be modified by + calendar.setfirstweekday(). + :param count: + How many occurrences will be generated. + + .. note:: + As of version 2.5.0, the use of the ``until`` keyword together + with the ``count`` keyword is deprecated per RFC-5545 Sec. 3.3.10. + :param until: + If given, this must be a datetime instance, that will specify the + limit of the recurrence. The last recurrence in the rule is the greatest + datetime that is less than or equal to the value specified in the + ``until`` parameter. + + .. note:: + As of version 2.5.0, the use of the ``until`` keyword together + with the ``count`` keyword is deprecated per RFC-5545 Sec. 3.3.10. + :param bysetpos: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each given integer will specify an occurrence + number, corresponding to the nth occurrence of the rule inside the + frequency period. For example, a bysetpos of -1 if combined with a + MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will + result in the last work day of every month. + :param bymonth: + If given, it must be either an integer, or a sequence of integers, + meaning the months to apply the recurrence to. + :param bymonthday: + If given, it must be either an integer, or a sequence of integers, + meaning the month days to apply the recurrence to. + :param byyearday: + If given, it must be either an integer, or a sequence of integers, + meaning the year days to apply the recurrence to. + :param byeaster: + If given, it must be either an integer, or a sequence of integers, + positive or negative. Each integer will define an offset from the + Easter Sunday. Passing the offset 0 to byeaster will yield the Easter + Sunday itself. This is an extension to the RFC specification. + :param byweekno: + If given, it must be either an integer, or a sequence of integers, + meaning the week numbers to apply the recurrence to. Week numbers + have the meaning described in ISO8601, that is, the first week of + the year is that containing at least four days of the new year. + :param byweekday: + If given, it must be either an integer (0 == MO), a sequence of + integers, one of the weekday constants (MO, TU, etc), or a sequence + of these constants. When given, these variables will define the + weekdays where the recurrence will be applied. It's also possible to + use an argument n for the weekday instances, which will mean the nth + occurrence of this weekday in the period. For example, with MONTHLY, + or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the + first friday of the month where the recurrence happens. Notice that in + the RFC documentation, this is specified as BYDAY, but was renamed to + avoid the ambiguity of that keyword. + :param byhour: + If given, it must be either an integer, or a sequence of integers, + meaning the hours to apply the recurrence to. + :param byminute: + If given, it must be either an integer, or a sequence of integers, + meaning the minutes to apply the recurrence to. + :param bysecond: + If given, it must be either an integer, or a sequence of integers, + meaning the seconds to apply the recurrence to. + :param cache: + If given, it must be a boolean value specifying to enable or disable + caching of results. If you will use the same rrule instance multiple + times, enabling caching will improve the performance considerably. + """ + def __init__(self, freq, dtstart=None, + interval=1, wkst=None, count=None, until=None, bysetpos=None, + bymonth=None, bymonthday=None, byyearday=None, byeaster=None, + byweekno=None, byweekday=None, + byhour=None, byminute=None, bysecond=None, + cache=False): + super(rrule, self).__init__(cache) + global easter + if not dtstart: + if until and until.tzinfo: + dtstart = datetime.datetime.now(tz=until.tzinfo).replace(microsecond=0) + else: + dtstart = datetime.datetime.now().replace(microsecond=0) + elif not isinstance(dtstart, datetime.datetime): + dtstart = datetime.datetime.fromordinal(dtstart.toordinal()) + else: + dtstart = dtstart.replace(microsecond=0) + self._dtstart = dtstart + self._tzinfo = dtstart.tzinfo + self._freq = freq + self._interval = interval + self._count = count + + # Cache the original byxxx rules, if they are provided, as the _byxxx + # attributes do not necessarily map to the inputs, and this can be + # a problem in generating the strings. Only store things if they've + # been supplied (the string retrieval will just use .get()) + self._original_rule = {} + + if until and not isinstance(until, datetime.datetime): + until = datetime.datetime.fromordinal(until.toordinal()) + self._until = until + + if self._dtstart and self._until: + if (self._dtstart.tzinfo is not None) != (self._until.tzinfo is not None): + # According to RFC5545 Section 3.3.10: + # https://tools.ietf.org/html/rfc5545#section-3.3.10 + # + # > If the "DTSTART" property is specified as a date with UTC + # > time or a date with local time and time zone reference, + # > then the UNTIL rule part MUST be specified as a date with + # > UTC time. + raise ValueError( + 'RRULE UNTIL values must be specified in UTC when DTSTART ' + 'is timezone-aware' + ) + + if count is not None and until: + warn("Using both 'count' and 'until' is inconsistent with RFC 5545" + " and has been deprecated in dateutil. Future versions will " + "raise an error.", DeprecationWarning) + + if wkst is None: + self._wkst = calendar.firstweekday() + elif isinstance(wkst, integer_types): + self._wkst = wkst + else: + self._wkst = wkst.weekday + + if bysetpos is None: + self._bysetpos = None + elif isinstance(bysetpos, integer_types): + if bysetpos == 0 or not (-366 <= bysetpos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + self._bysetpos = (bysetpos,) + else: + self._bysetpos = tuple(bysetpos) + for pos in self._bysetpos: + if pos == 0 or not (-366 <= pos <= 366): + raise ValueError("bysetpos must be between 1 and 366, " + "or between -366 and -1") + + if self._bysetpos: + self._original_rule['bysetpos'] = self._bysetpos + + if (byweekno is None and byyearday is None and bymonthday is None and + byweekday is None and byeaster is None): + if freq == YEARLY: + if bymonth is None: + bymonth = dtstart.month + self._original_rule['bymonth'] = None + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == MONTHLY: + bymonthday = dtstart.day + self._original_rule['bymonthday'] = None + elif freq == WEEKLY: + byweekday = dtstart.weekday() + self._original_rule['byweekday'] = None + + # bymonth + if bymonth is None: + self._bymonth = None + else: + if isinstance(bymonth, integer_types): + bymonth = (bymonth,) + + self._bymonth = tuple(sorted(set(bymonth))) + + if 'bymonth' not in self._original_rule: + self._original_rule['bymonth'] = self._bymonth + + # byyearday + if byyearday is None: + self._byyearday = None + else: + if isinstance(byyearday, integer_types): + byyearday = (byyearday,) + + self._byyearday = tuple(sorted(set(byyearday))) + self._original_rule['byyearday'] = self._byyearday + + # byeaster + if byeaster is not None: + if not easter: + from dateutil import easter + if isinstance(byeaster, integer_types): + self._byeaster = (byeaster,) + else: + self._byeaster = tuple(sorted(byeaster)) + + self._original_rule['byeaster'] = self._byeaster + else: + self._byeaster = None + + # bymonthday + if bymonthday is None: + self._bymonthday = () + self._bynmonthday = () + else: + if isinstance(bymonthday, integer_types): + bymonthday = (bymonthday,) + + bymonthday = set(bymonthday) # Ensure it's unique + + self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0)) + self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0)) + + # Storing positive numbers first, then negative numbers + if 'bymonthday' not in self._original_rule: + self._original_rule['bymonthday'] = tuple( + itertools.chain(self._bymonthday, self._bynmonthday)) + + # byweekno + if byweekno is None: + self._byweekno = None + else: + if isinstance(byweekno, integer_types): + byweekno = (byweekno,) + + self._byweekno = tuple(sorted(set(byweekno))) + + self._original_rule['byweekno'] = self._byweekno + + # byweekday / bynweekday + if byweekday is None: + self._byweekday = None + self._bynweekday = None + else: + # If it's one of the valid non-sequence types, convert to a + # single-element sequence before the iterator that builds the + # byweekday set. + if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"): + byweekday = (byweekday,) + + self._byweekday = set() + self._bynweekday = set() + for wday in byweekday: + if isinstance(wday, integer_types): + self._byweekday.add(wday) + elif not wday.n or freq > MONTHLY: + self._byweekday.add(wday.weekday) + else: + self._bynweekday.add((wday.weekday, wday.n)) + + if not self._byweekday: + self._byweekday = None + elif not self._bynweekday: + self._bynweekday = None + + if self._byweekday is not None: + self._byweekday = tuple(sorted(self._byweekday)) + orig_byweekday = [weekday(x) for x in self._byweekday] + else: + orig_byweekday = () + + if self._bynweekday is not None: + self._bynweekday = tuple(sorted(self._bynweekday)) + orig_bynweekday = [weekday(*x) for x in self._bynweekday] + else: + orig_bynweekday = () + + if 'byweekday' not in self._original_rule: + self._original_rule['byweekday'] = tuple(itertools.chain( + orig_byweekday, orig_bynweekday)) + + # byhour + if byhour is None: + if freq < HOURLY: + self._byhour = {dtstart.hour} + else: + self._byhour = None + else: + if isinstance(byhour, integer_types): + byhour = (byhour,) + + if freq == HOURLY: + self._byhour = self.__construct_byset(start=dtstart.hour, + byxxx=byhour, + base=24) + else: + self._byhour = set(byhour) + + self._byhour = tuple(sorted(self._byhour)) + self._original_rule['byhour'] = self._byhour + + # byminute + if byminute is None: + if freq < MINUTELY: + self._byminute = {dtstart.minute} + else: + self._byminute = None + else: + if isinstance(byminute, integer_types): + byminute = (byminute,) + + if freq == MINUTELY: + self._byminute = self.__construct_byset(start=dtstart.minute, + byxxx=byminute, + base=60) + else: + self._byminute = set(byminute) + + self._byminute = tuple(sorted(self._byminute)) + self._original_rule['byminute'] = self._byminute + + # bysecond + if bysecond is None: + if freq < SECONDLY: + self._bysecond = ((dtstart.second,)) + else: + self._bysecond = None + else: + if isinstance(bysecond, integer_types): + bysecond = (bysecond,) + + self._bysecond = set(bysecond) + + if freq == SECONDLY: + self._bysecond = self.__construct_byset(start=dtstart.second, + byxxx=bysecond, + base=60) + else: + self._bysecond = set(bysecond) + + self._bysecond = tuple(sorted(self._bysecond)) + self._original_rule['bysecond'] = self._bysecond + + if self._freq >= HOURLY: + self._timeset = None + else: + self._timeset = [] + for hour in self._byhour: + for minute in self._byminute: + for second in self._bysecond: + self._timeset.append( + datetime.time(hour, minute, second, + tzinfo=self._tzinfo)) + self._timeset.sort() + self._timeset = tuple(self._timeset) + + def __str__(self): + """ + Output a string that would generate this RRULE if passed to rrulestr. + This is mostly compatible with RFC5545, except for the + dateutil-specific extension BYEASTER. + """ + + output = [] + h, m, s = [None] * 3 + if self._dtstart: + output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S')) + h, m, s = self._dtstart.timetuple()[3:6] + + parts = ['FREQ=' + FREQNAMES[self._freq]] + if self._interval != 1: + parts.append('INTERVAL=' + str(self._interval)) + + if self._wkst: + parts.append('WKST=' + repr(weekday(self._wkst))[0:2]) + + if self._count is not None: + parts.append('COUNT=' + str(self._count)) + + if self._until: + parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S')) + + if self._original_rule.get('byweekday') is not None: + # The str() method on weekday objects doesn't generate + # RFC5545-compliant strings, so we should modify that. + original_rule = dict(self._original_rule) + wday_strings = [] + for wday in original_rule['byweekday']: + if wday.n: + wday_strings.append('{n:+d}{wday}'.format( + n=wday.n, + wday=repr(wday)[0:2])) + else: + wday_strings.append(repr(wday)) + + original_rule['byweekday'] = wday_strings + else: + original_rule = self._original_rule + + partfmt = '{name}={vals}' + for name, key in [('BYSETPOS', 'bysetpos'), + ('BYMONTH', 'bymonth'), + ('BYMONTHDAY', 'bymonthday'), + ('BYYEARDAY', 'byyearday'), + ('BYWEEKNO', 'byweekno'), + ('BYDAY', 'byweekday'), + ('BYHOUR', 'byhour'), + ('BYMINUTE', 'byminute'), + ('BYSECOND', 'bysecond'), + ('BYEASTER', 'byeaster')]: + value = original_rule.get(key) + if value: + parts.append(partfmt.format(name=name, vals=(','.join(str(v) + for v in value)))) + + output.append('RRULE:' + ';'.join(parts)) + return '\n'.join(output) + + def replace(self, **kwargs): + """Return new rrule with same attributes except for those attributes given new + values by whichever keyword arguments are specified.""" + new_kwargs = {"interval": self._interval, + "count": self._count, + "dtstart": self._dtstart, + "freq": self._freq, + "until": self._until, + "wkst": self._wkst, + "cache": False if self._cache is None else True } + new_kwargs.update(self._original_rule) + new_kwargs.update(kwargs) + return rrule(**new_kwargs) + + def _iter(self): + year, month, day, hour, minute, second, weekday, yearday, _ = \ + self._dtstart.timetuple() + + # Some local variables to speed things up a bit + freq = self._freq + interval = self._interval + wkst = self._wkst + until = self._until + bymonth = self._bymonth + byweekno = self._byweekno + byyearday = self._byyearday + byweekday = self._byweekday + byeaster = self._byeaster + bymonthday = self._bymonthday + bynmonthday = self._bynmonthday + bysetpos = self._bysetpos + byhour = self._byhour + byminute = self._byminute + bysecond = self._bysecond + + ii = _iterinfo(self) + ii.rebuild(year, month) + + getdayset = {YEARLY: ii.ydayset, + MONTHLY: ii.mdayset, + WEEKLY: ii.wdayset, + DAILY: ii.ddayset, + HOURLY: ii.ddayset, + MINUTELY: ii.ddayset, + SECONDLY: ii.ddayset}[freq] + + if freq < HOURLY: + timeset = self._timeset + else: + gettimeset = {HOURLY: ii.htimeset, + MINUTELY: ii.mtimeset, + SECONDLY: ii.stimeset}[freq] + if ((freq >= HOURLY and + self._byhour and hour not in self._byhour) or + (freq >= MINUTELY and + self._byminute and minute not in self._byminute) or + (freq >= SECONDLY and + self._bysecond and second not in self._bysecond)): + timeset = () + else: + timeset = gettimeset(hour, minute, second) + + total = 0 + count = self._count + while True: + # Get dayset with the right frequency + dayset, start, end = getdayset(year, month, day) + + # Do the "hard" work ;-) + filtered = False + for i in dayset[start:end]: + if ((bymonth and ii.mmask[i] not in bymonth) or + (byweekno and not ii.wnomask[i]) or + (byweekday and ii.wdaymask[i] not in byweekday) or + (ii.nwdaymask and not ii.nwdaymask[i]) or + (byeaster and not ii.eastermask[i]) or + ((bymonthday or bynmonthday) and + ii.mdaymask[i] not in bymonthday and + ii.nmdaymask[i] not in bynmonthday) or + (byyearday and + ((i < ii.yearlen and i+1 not in byyearday and + -ii.yearlen+i not in byyearday) or + (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and + -ii.nextyearlen+i-ii.yearlen not in byyearday)))): + dayset[i] = None + filtered = True + + # Output results + if bysetpos and timeset: + poslist = [] + for pos in bysetpos: + if pos < 0: + daypos, timepos = divmod(pos, len(timeset)) + else: + daypos, timepos = divmod(pos-1, len(timeset)) + try: + i = [x for x in dayset[start:end] + if x is not None][daypos] + time = timeset[timepos] + except IndexError: + pass + else: + date = datetime.date.fromordinal(ii.yearordinal+i) + res = datetime.datetime.combine(date, time) + if res not in poslist: + poslist.append(res) + poslist.sort() + for res in poslist: + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + if count is not None: + count -= 1 + if count < 0: + self._len = total + return + total += 1 + yield res + else: + for i in dayset[start:end]: + if i is not None: + date = datetime.date.fromordinal(ii.yearordinal + i) + for time in timeset: + res = datetime.datetime.combine(date, time) + if until and res > until: + self._len = total + return + elif res >= self._dtstart: + if count is not None: + count -= 1 + if count < 0: + self._len = total + return + + total += 1 + yield res + + # Handle frequency and interval + fixday = False + if freq == YEARLY: + year += interval + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == MONTHLY: + month += interval + if month > 12: + div, mod = divmod(month, 12) + month = mod + year += div + if month == 0: + month = 12 + year -= 1 + if year > datetime.MAXYEAR: + self._len = total + return + ii.rebuild(year, month) + elif freq == WEEKLY: + if wkst > weekday: + day += -(weekday+1+(6-wkst))+self._interval*7 + else: + day += -(weekday-wkst)+self._interval*7 + weekday = wkst + fixday = True + elif freq == DAILY: + day += interval + fixday = True + elif freq == HOURLY: + if filtered: + # Jump to one iteration before next day + hour += ((23-hour)//interval)*interval + + if byhour: + ndays, hour = self.__mod_distance(value=hour, + byxxx=self._byhour, + base=24) + else: + ndays, hour = divmod(hour+interval, 24) + + if ndays: + day += ndays + fixday = True + + timeset = gettimeset(hour, minute, second) + elif freq == MINUTELY: + if filtered: + # Jump to one iteration before next day + minute += ((1439-(hour*60+minute))//interval)*interval + + valid = False + rep_rate = (24*60) + for j in range(rep_rate // gcd(interval, rep_rate)): + if byminute: + nhours, minute = \ + self.__mod_distance(value=minute, + byxxx=self._byminute, + base=60) + else: + nhours, minute = divmod(minute+interval, 60) + + div, hour = divmod(hour+nhours, 24) + if div: + day += div + fixday = True + filtered = False + + if not byhour or hour in byhour: + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval and ' + + 'byhour resulting in empty rule.') + + timeset = gettimeset(hour, minute, second) + elif freq == SECONDLY: + if filtered: + # Jump to one iteration before next day + second += (((86399 - (hour * 3600 + minute * 60 + second)) + // interval) * interval) + + rep_rate = (24 * 3600) + valid = False + for j in range(0, rep_rate // gcd(interval, rep_rate)): + if bysecond: + nminutes, second = \ + self.__mod_distance(value=second, + byxxx=self._bysecond, + base=60) + else: + nminutes, second = divmod(second+interval, 60) + + div, minute = divmod(minute+nminutes, 60) + if div: + hour += div + div, hour = divmod(hour, 24) + if div: + day += div + fixday = True + + if ((not byhour or hour in byhour) and + (not byminute or minute in byminute) and + (not bysecond or second in bysecond)): + valid = True + break + + if not valid: + raise ValueError('Invalid combination of interval, ' + + 'byhour and byminute resulting in empty' + + ' rule.') + + timeset = gettimeset(hour, minute, second) + + if fixday and day > 28: + daysinmonth = calendar.monthrange(year, month)[1] + if day > daysinmonth: + while day > daysinmonth: + day -= daysinmonth + month += 1 + if month == 13: + month = 1 + year += 1 + if year > datetime.MAXYEAR: + self._len = total + return + daysinmonth = calendar.monthrange(year, month)[1] + ii.rebuild(year, month) + + def __construct_byset(self, start, byxxx, base): + """ + If a `BYXXX` sequence is passed to the constructor at the same level as + `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some + specifications which cannot be reached given some starting conditions. + + This occurs whenever the interval is not coprime with the base of a + given unit and the difference between the starting position and the + ending position is not coprime with the greatest common denominator + between the interval and the base. For example, with a FREQ of hourly + starting at 17:00 and an interval of 4, the only valid values for + BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not + coprime. + + :param start: + Specifies the starting position. + :param byxxx: + An iterable containing the list of allowed values. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + This does not preserve the type of the iterable, returning a set, since + the values should be unique and the order is irrelevant, this will + speed up later lookups. + + In the event of an empty set, raises a :exception:`ValueError`, as this + results in an empty rrule. + """ + + cset = set() + + # Support a single byxxx value. + if isinstance(byxxx, integer_types): + byxxx = (byxxx, ) + + for num in byxxx: + i_gcd = gcd(self._interval, base) + # Use divmod rather than % because we need to wrap negative nums. + if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0: + cset.add(num) + + if len(cset) == 0: + raise ValueError("Invalid rrule byxxx generates an empty set.") + + return cset + + def __mod_distance(self, value, byxxx, base): + """ + Calculates the next value in a sequence where the `FREQ` parameter is + specified along with a `BYXXX` parameter at the same "level" + (e.g. `HOURLY` specified with `BYHOUR`). + + :param value: + The old value of the component. + :param byxxx: + The `BYXXX` set, which should have been generated by + `rrule._construct_byset`, or something else which checks that a + valid rule is present. + :param base: + The largest allowable value for the specified frequency (e.g. + 24 hours, 60 minutes). + + If a valid value is not found after `base` iterations (the maximum + number before the sequence would start to repeat), this raises a + :exception:`ValueError`, as no valid values were found. + + This returns a tuple of `divmod(n*interval, base)`, where `n` is the + smallest number of `interval` repetitions until the next specified + value in `byxxx` is found. + """ + accumulator = 0 + for ii in range(1, base + 1): + # Using divmod() over % to account for negative intervals + div, value = divmod(value + self._interval, base) + accumulator += div + if value in byxxx: + return (accumulator, value) + + +class _iterinfo(object): + __slots__ = ["rrule", "lastyear", "lastmonth", + "yearlen", "nextyearlen", "yearordinal", "yearweekday", + "mmask", "mrange", "mdaymask", "nmdaymask", + "wdaymask", "wnomask", "nwdaymask", "eastermask"] + + def __init__(self, rrule): + for attr in self.__slots__: + setattr(self, attr, None) + self.rrule = rrule + + def rebuild(self, year, month): + # Every mask is 7 days longer to handle cross-year weekly periods. + rr = self.rrule + if year != self.lastyear: + self.yearlen = 365 + calendar.isleap(year) + self.nextyearlen = 365 + calendar.isleap(year + 1) + firstyday = datetime.date(year, 1, 1) + self.yearordinal = firstyday.toordinal() + self.yearweekday = firstyday.weekday() + + wday = datetime.date(year, 1, 1).weekday() + if self.yearlen == 365: + self.mmask = M365MASK + self.mdaymask = MDAY365MASK + self.nmdaymask = NMDAY365MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M365RANGE + else: + self.mmask = M366MASK + self.mdaymask = MDAY366MASK + self.nmdaymask = NMDAY366MASK + self.wdaymask = WDAYMASK[wday:] + self.mrange = M366RANGE + + if not rr._byweekno: + self.wnomask = None + else: + self.wnomask = [0]*(self.yearlen+7) + # no1wkst = firstwkst = self.wdaymask.index(rr._wkst) + no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7 + if no1wkst >= 4: + no1wkst = 0 + # Number of days in the year, plus the days we got + # from last year. + wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7 + else: + # Number of days in the year, minus the days we + # left in last year. + wyearlen = self.yearlen-no1wkst + div, mod = divmod(wyearlen, 7) + numweeks = div+mod//4 + for n in rr._byweekno: + if n < 0: + n += numweeks+1 + if not (0 < n <= numweeks): + continue + if n > 1: + i = no1wkst+(n-1)*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + else: + i = no1wkst + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if 1 in rr._byweekno: + # Check week number 1 of next year as well + # TODO: Check -numweeks for next year. + i = no1wkst+numweeks*7 + if no1wkst != firstwkst: + i -= 7-firstwkst + if i < self.yearlen: + # If week starts in next year, we + # don't care about it. + for j in range(7): + self.wnomask[i] = 1 + i += 1 + if self.wdaymask[i] == rr._wkst: + break + if no1wkst: + # Check last week number of last year as + # well. If no1wkst is 0, either the year + # started on week start, or week number 1 + # got days from last year, so there are no + # days from last year's last week number in + # this year. + if -1 not in rr._byweekno: + lyearweekday = datetime.date(year-1, 1, 1).weekday() + lno1wkst = (7-lyearweekday+rr._wkst) % 7 + lyearlen = 365+calendar.isleap(year-1) + if lno1wkst >= 4: + lno1wkst = 0 + lnumweeks = 52+(lyearlen + + (lyearweekday-rr._wkst) % 7) % 7//4 + else: + lnumweeks = 52+(self.yearlen-no1wkst) % 7//4 + else: + lnumweeks = -1 + if lnumweeks in rr._byweekno: + for i in range(no1wkst): + self.wnomask[i] = 1 + + if (rr._bynweekday and (month != self.lastmonth or + year != self.lastyear)): + ranges = [] + if rr._freq == YEARLY: + if rr._bymonth: + for month in rr._bymonth: + ranges.append(self.mrange[month-1:month+1]) + else: + ranges = [(0, self.yearlen)] + elif rr._freq == MONTHLY: + ranges = [self.mrange[month-1:month+1]] + if ranges: + # Weekly frequency won't get here, so we may not + # care about cross-year weekly periods. + self.nwdaymask = [0]*self.yearlen + for first, last in ranges: + last -= 1 + for wday, n in rr._bynweekday: + if n < 0: + i = last+(n+1)*7 + i -= (self.wdaymask[i]-wday) % 7 + else: + i = first+(n-1)*7 + i += (7-self.wdaymask[i]+wday) % 7 + if first <= i <= last: + self.nwdaymask[i] = 1 + + if rr._byeaster: + self.eastermask = [0]*(self.yearlen+7) + eyday = easter.easter(year).toordinal()-self.yearordinal + for offset in rr._byeaster: + self.eastermask[eyday+offset] = 1 + + self.lastyear = year + self.lastmonth = month + + def ydayset(self, year, month, day): + return list(range(self.yearlen)), 0, self.yearlen + + def mdayset(self, year, month, day): + dset = [None]*self.yearlen + start, end = self.mrange[month-1:month+1] + for i in range(start, end): + dset[i] = i + return dset, start, end + + def wdayset(self, year, month, day): + # We need to handle cross-year weeks here. + dset = [None]*(self.yearlen+7) + i = datetime.date(year, month, day).toordinal()-self.yearordinal + start = i + for j in range(7): + dset[i] = i + i += 1 + # if (not (0 <= i < self.yearlen) or + # self.wdaymask[i] == self.rrule._wkst): + # This will cross the year boundary, if necessary. + if self.wdaymask[i] == self.rrule._wkst: + break + return dset, start, i + + def ddayset(self, year, month, day): + dset = [None] * self.yearlen + i = datetime.date(year, month, day).toordinal() - self.yearordinal + dset[i] = i + return dset, i, i + 1 + + def htimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for minute in rr._byminute: + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, + tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def mtimeset(self, hour, minute, second): + tset = [] + rr = self.rrule + for second in rr._bysecond: + tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) + tset.sort() + return tset + + def stimeset(self, hour, minute, second): + return (datetime.time(hour, minute, second, + tzinfo=self.rrule._tzinfo),) + + +class rruleset(rrulebase): + """ The rruleset type allows more complex recurrence setups, mixing + multiple rules, dates, exclusion rules, and exclusion dates. The type + constructor takes the following keyword arguments: + + :param cache: If True, caching of results will be enabled, improving + performance of multiple queries considerably. """ + + class _genitem(object): + def __init__(self, genlist, gen): + try: + self.dt = advance_iterator(gen) + genlist.append(self) + except StopIteration: + pass + self.genlist = genlist + self.gen = gen + + def __next__(self): + try: + self.dt = advance_iterator(self.gen) + except StopIteration: + if self.genlist[0] is self: + heapq.heappop(self.genlist) + else: + self.genlist.remove(self) + heapq.heapify(self.genlist) + + next = __next__ + + def __lt__(self, other): + return self.dt < other.dt + + def __gt__(self, other): + return self.dt > other.dt + + def __eq__(self, other): + return self.dt == other.dt + + def __ne__(self, other): + return self.dt != other.dt + + def __init__(self, cache=False): + super(rruleset, self).__init__(cache) + self._rrule = [] + self._rdate = [] + self._exrule = [] + self._exdate = [] + + @_invalidates_cache + def rrule(self, rrule): + """ Include the given :py:class:`rrule` instance in the recurrence set + generation. """ + self._rrule.append(rrule) + + @_invalidates_cache + def rdate(self, rdate): + """ Include the given :py:class:`datetime` instance in the recurrence + set generation. """ + self._rdate.append(rdate) + + @_invalidates_cache + def exrule(self, exrule): + """ Include the given rrule instance in the recurrence set exclusion + list. Dates which are part of the given recurrence rules will not + be generated, even if some inclusive rrule or rdate matches them. + """ + self._exrule.append(exrule) + + @_invalidates_cache + def exdate(self, exdate): + """ Include the given datetime instance in the recurrence set + exclusion list. Dates included that way will not be generated, + even if some inclusive rrule or rdate matches them. """ + self._exdate.append(exdate) + + def _iter(self): + rlist = [] + self._rdate.sort() + self._genitem(rlist, iter(self._rdate)) + for gen in [iter(x) for x in self._rrule]: + self._genitem(rlist, gen) + exlist = [] + self._exdate.sort() + self._genitem(exlist, iter(self._exdate)) + for gen in [iter(x) for x in self._exrule]: + self._genitem(exlist, gen) + lastdt = None + total = 0 + heapq.heapify(rlist) + heapq.heapify(exlist) + while rlist: + ritem = rlist[0] + if not lastdt or lastdt != ritem.dt: + while exlist and exlist[0] < ritem: + exitem = exlist[0] + advance_iterator(exitem) + if exlist and exlist[0] is exitem: + heapq.heapreplace(exlist, exitem) + if not exlist or ritem != exlist[0]: + total += 1 + yield ritem.dt + lastdt = ritem.dt + advance_iterator(ritem) + if rlist and rlist[0] is ritem: + heapq.heapreplace(rlist, ritem) + self._len = total + + +class _rrulestr(object): + + _freq_map = {"YEARLY": YEARLY, + "MONTHLY": MONTHLY, + "WEEKLY": WEEKLY, + "DAILY": DAILY, + "HOURLY": HOURLY, + "MINUTELY": MINUTELY, + "SECONDLY": SECONDLY} + + _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3, + "FR": 4, "SA": 5, "SU": 6} + + def _handle_int(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = int(value) + + def _handle_int_list(self, rrkwargs, name, value, **kwargs): + rrkwargs[name.lower()] = [int(x) for x in value.split(',')] + + _handle_INTERVAL = _handle_int + _handle_COUNT = _handle_int + _handle_BYSETPOS = _handle_int_list + _handle_BYMONTH = _handle_int_list + _handle_BYMONTHDAY = _handle_int_list + _handle_BYYEARDAY = _handle_int_list + _handle_BYEASTER = _handle_int_list + _handle_BYWEEKNO = _handle_int_list + _handle_BYHOUR = _handle_int_list + _handle_BYMINUTE = _handle_int_list + _handle_BYSECOND = _handle_int_list + + def _handle_FREQ(self, rrkwargs, name, value, **kwargs): + rrkwargs["freq"] = self._freq_map[value] + + def _handle_UNTIL(self, rrkwargs, name, value, **kwargs): + global parser + if not parser: + from dateutil import parser + try: + rrkwargs["until"] = parser.parse(value, + ignoretz=kwargs.get("ignoretz"), + tzinfos=kwargs.get("tzinfos")) + except ValueError: + raise ValueError("invalid until date") + + def _handle_WKST(self, rrkwargs, name, value, **kwargs): + rrkwargs["wkst"] = self._weekday_map[value] + + def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs): + """ + Two ways to specify this: +1MO or MO(+1) + """ + l = [] + for wday in value.split(','): + if '(' in wday: + # If it's of the form TH(+1), etc. + splt = wday.split('(') + w = splt[0] + n = int(splt[1][:-1]) + elif len(wday): + # If it's of the form +1MO + for i in range(len(wday)): + if wday[i] not in '+-0123456789': + break + n = wday[:i] or None + w = wday[i:] + if n: + n = int(n) + else: + raise ValueError("Invalid (empty) BYDAY specification.") + + l.append(weekdays[self._weekday_map[w]](n)) + rrkwargs["byweekday"] = l + + _handle_BYDAY = _handle_BYWEEKDAY + + def _parse_rfc_rrule(self, line, + dtstart=None, + cache=False, + ignoretz=False, + tzinfos=None): + if line.find(':') != -1: + name, value = line.split(':') + if name != "RRULE": + raise ValueError("unknown parameter name") + else: + value = line + rrkwargs = {} + for pair in value.split(';'): + name, value = pair.split('=') + name = name.upper() + value = value.upper() + try: + getattr(self, "_handle_"+name)(rrkwargs, name, value, + ignoretz=ignoretz, + tzinfos=tzinfos) + except AttributeError: + raise ValueError("unknown parameter '%s'" % name) + except (KeyError, ValueError): + raise ValueError("invalid '%s': %s" % (name, value)) + return rrule(dtstart=dtstart, cache=cache, **rrkwargs) + + def _parse_rfc(self, s, + dtstart=None, + cache=False, + unfold=False, + forceset=False, + compatible=False, + ignoretz=False, + tzids=None, + tzinfos=None): + global parser + if compatible: + forceset = True + unfold = True + + TZID_NAMES = dict(map( + lambda x: (x.upper(), x), + re.findall('TZID=(?P[^:]+):', s) + )) + s = s.upper() + if not s.strip(): + raise ValueError("empty string") + if unfold: + lines = s.splitlines() + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + else: + lines = s.split() + if (not forceset and len(lines) == 1 and (s.find(':') == -1 or + s.startswith('RRULE:'))): + return self._parse_rfc_rrule(lines[0], cache=cache, + dtstart=dtstart, ignoretz=ignoretz, + tzinfos=tzinfos) + else: + rrulevals = [] + rdatevals = [] + exrulevals = [] + exdatevals = [] + for line in lines: + if not line: + continue + if line.find(':') == -1: + name = "RRULE" + value = line + else: + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError("empty property name") + name = parms[0] + parms = parms[1:] + if name == "RRULE": + for parm in parms: + raise ValueError("unsupported RRULE parm: "+parm) + rrulevals.append(value) + elif name == "RDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError("unsupported RDATE parm: "+parm) + rdatevals.append(value) + elif name == "EXRULE": + for parm in parms: + raise ValueError("unsupported EXRULE parm: "+parm) + exrulevals.append(value) + elif name == "EXDATE": + for parm in parms: + if parm != "VALUE=DATE-TIME": + raise ValueError("unsupported EXDATE parm: "+parm) + exdatevals.append(value) + elif name == "DTSTART": + # RFC 5445 3.8.2.4: The VALUE parameter is optional, but + # may be found only once. + value_found = False + TZID = None + valid_values = {"VALUE=DATE-TIME", "VALUE=DATE"} + for parm in parms: + if parm.startswith("TZID="): + try: + tzkey = TZID_NAMES[parm.split('TZID=')[-1]] + except KeyError: + continue + if tzids is None: + from . import tz + tzlookup = tz.gettz + elif callable(tzids): + tzlookup = tzids + else: + tzlookup = getattr(tzids, 'get', None) + if tzlookup is None: + msg = ('tzids must be a callable, ' + + 'mapping, or None, ' + + 'not %s' % tzids) + raise ValueError(msg) + + TZID = tzlookup(tzkey) + continue + if parm not in valid_values: + raise ValueError("unsupported DTSTART parm: "+parm) + else: + if value_found: + msg = ("Duplicate value parameter found in " + + "DTSTART: " + parm) + raise ValueError(msg) + value_found = True + if not parser: + from dateutil import parser + dtstart = parser.parse(value, ignoretz=ignoretz, + tzinfos=tzinfos) + if TZID is not None: + if dtstart.tzinfo is None: + dtstart = dtstart.replace(tzinfo=TZID) + else: + raise ValueError('DTSTART specifies multiple timezones') + else: + raise ValueError("unsupported property: "+name) + if (forceset or len(rrulevals) > 1 or rdatevals + or exrulevals or exdatevals): + if not parser and (rdatevals or exdatevals): + from dateutil import parser + rset = rruleset(cache=cache) + for value in rrulevals: + rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in rdatevals: + for datestr in value.split(','): + rset.rdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exrulevals: + rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, + ignoretz=ignoretz, + tzinfos=tzinfos)) + for value in exdatevals: + for datestr in value.split(','): + rset.exdate(parser.parse(datestr, + ignoretz=ignoretz, + tzinfos=tzinfos)) + if compatible and dtstart: + rset.rdate(dtstart) + return rset + else: + return self._parse_rfc_rrule(rrulevals[0], + dtstart=dtstart, + cache=cache, + ignoretz=ignoretz, + tzinfos=tzinfos) + + def __call__(self, s, **kwargs): + return self._parse_rfc(s, **kwargs) + + +rrulestr = _rrulestr() + +# vim:ts=4:sw=4:et diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/tz/__init__.py b/thesisenv/lib/python3.6/site-packages/dateutil/tz/__init__.py new file mode 100644 index 0000000..5a2d9cd --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/dateutil/tz/__init__.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +from .tz import * +from .tz import __doc__ + +#: Convenience constant providing a :class:`tzutc()` instance +#: +#: .. versionadded:: 2.7.0 +UTC = tzutc() + +__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange", + "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz", + "enfold", "datetime_ambiguous", "datetime_exists", + "resolve_imaginary", "UTC", "DeprecatedTzFormatWarning"] + + +class DeprecatedTzFormatWarning(Warning): + """Warning raised when time zones are parsed from deprecated formats.""" diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/tz/_common.py b/thesisenv/lib/python3.6/site-packages/dateutil/tz/_common.py new file mode 100644 index 0000000..ccabb7d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/dateutil/tz/_common.py @@ -0,0 +1,415 @@ +from six import PY3 + +from functools import wraps + +from datetime import datetime, timedelta, tzinfo + + +ZERO = timedelta(0) + +__all__ = ['tzname_in_python2', 'enfold'] + + +def tzname_in_python2(namefunc): + """Change unicode output into bytestrings in Python 2 + + tzname() API changed in Python 3. It used to return bytes, but was changed + to unicode strings + """ + def adjust_encoding(*args, **kwargs): + name = namefunc(*args, **kwargs) + if name is not None and not PY3: + name = name.encode() + + return name + + return adjust_encoding + + +# The following is adapted from Alexander Belopolsky's tz library +# https://github.com/abalkin/tz +if hasattr(datetime, 'fold'): + # This is the pre-python 3.6 fold situation + def enfold(dt, fold=1): + """ + Provides a unified interface for assigning the ``fold`` attribute to + datetimes both before and after the implementation of PEP-495. + + :param fold: + The value for the ``fold`` attribute in the returned datetime. This + should be either 0 or 1. + + :return: + Returns an object for which ``getattr(dt, 'fold', 0)`` returns + ``fold`` for all versions of Python. In versions prior to + Python 3.6, this is a ``_DatetimeWithFold`` object, which is a + subclass of :py:class:`datetime.datetime` with the ``fold`` + attribute added, if ``fold`` is 1. + + .. versionadded:: 2.6.0 + """ + return dt.replace(fold=fold) + +else: + class _DatetimeWithFold(datetime): + """ + This is a class designed to provide a PEP 495-compliant interface for + Python versions before 3.6. It is used only for dates in a fold, so + the ``fold`` attribute is fixed at ``1``. + + .. versionadded:: 2.6.0 + """ + __slots__ = () + + def replace(self, *args, **kwargs): + """ + Return a datetime with the same attributes, except for those + attributes given new values by whichever keyword arguments are + specified. Note that tzinfo=None can be specified to create a naive + datetime from an aware datetime with no conversion of date and time + data. + + This is reimplemented in ``_DatetimeWithFold`` because pypy3 will + return a ``datetime.datetime`` even if ``fold`` is unchanged. + """ + argnames = ( + 'year', 'month', 'day', 'hour', 'minute', 'second', + 'microsecond', 'tzinfo' + ) + + for arg, argname in zip(args, argnames): + if argname in kwargs: + raise TypeError('Duplicate argument: {}'.format(argname)) + + kwargs[argname] = arg + + for argname in argnames: + if argname not in kwargs: + kwargs[argname] = getattr(self, argname) + + dt_class = self.__class__ if kwargs.get('fold', 1) else datetime + + return dt_class(**kwargs) + + @property + def fold(self): + return 1 + + def enfold(dt, fold=1): + """ + Provides a unified interface for assigning the ``fold`` attribute to + datetimes both before and after the implementation of PEP-495. + + :param fold: + The value for the ``fold`` attribute in the returned datetime. This + should be either 0 or 1. + + :return: + Returns an object for which ``getattr(dt, 'fold', 0)`` returns + ``fold`` for all versions of Python. In versions prior to + Python 3.6, this is a ``_DatetimeWithFold`` object, which is a + subclass of :py:class:`datetime.datetime` with the ``fold`` + attribute added, if ``fold`` is 1. + + .. versionadded:: 2.6.0 + """ + if getattr(dt, 'fold', 0) == fold: + return dt + + args = dt.timetuple()[:6] + args += (dt.microsecond, dt.tzinfo) + + if fold: + return _DatetimeWithFold(*args) + else: + return datetime(*args) + + +def _validate_fromutc_inputs(f): + """ + The CPython version of ``fromutc`` checks that the input is a ``datetime`` + object and that ``self`` is attached as its ``tzinfo``. + """ + @wraps(f) + def fromutc(self, dt): + if not isinstance(dt, datetime): + raise TypeError("fromutc() requires a datetime argument") + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + return f(self, dt) + + return fromutc + + +class _tzinfo(tzinfo): + """ + Base class for all ``dateutil`` ``tzinfo`` objects. + """ + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + + dt = dt.replace(tzinfo=self) + + wall_0 = enfold(dt, fold=0) + wall_1 = enfold(dt, fold=1) + + same_offset = wall_0.utcoffset() == wall_1.utcoffset() + same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None) + + return same_dt and not same_offset + + def _fold_status(self, dt_utc, dt_wall): + """ + Determine the fold status of a "wall" datetime, given a representation + of the same datetime as a (naive) UTC datetime. This is calculated based + on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all + datetimes, and that this offset is the actual number of hours separating + ``dt_utc`` and ``dt_wall``. + + :param dt_utc: + Representation of the datetime as UTC + + :param dt_wall: + Representation of the datetime as "wall time". This parameter must + either have a `fold` attribute or have a fold-naive + :class:`datetime.tzinfo` attached, otherwise the calculation may + fail. + """ + if self.is_ambiguous(dt_wall): + delta_wall = dt_wall - dt_utc + _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst())) + else: + _fold = 0 + + return _fold + + def _fold(self, dt): + return getattr(dt, 'fold', 0) + + def _fromutc(self, dt): + """ + Given a timezone-aware datetime in a given timezone, calculates a + timezone-aware datetime in a new timezone. + + Since this is the one time that we *know* we have an unambiguous + datetime object, we take this opportunity to determine whether the + datetime is ambiguous and in a "fold" state (e.g. if it's the first + occurence, chronologically, of the ambiguous datetime). + + :param dt: + A timezone-aware :class:`datetime.datetime` object. + """ + + # Re-implement the algorithm from Python's datetime.py + dtoff = dt.utcoffset() + if dtoff is None: + raise ValueError("fromutc() requires a non-None utcoffset() " + "result") + + # The original datetime.py code assumes that `dst()` defaults to + # zero during ambiguous times. PEP 495 inverts this presumption, so + # for pre-PEP 495 versions of python, we need to tweak the algorithm. + dtdst = dt.dst() + if dtdst is None: + raise ValueError("fromutc() requires a non-None dst() result") + delta = dtoff - dtdst + + dt += delta + # Set fold=1 so we can default to being in the fold for + # ambiguous dates. + dtdst = enfold(dt, fold=1).dst() + if dtdst is None: + raise ValueError("fromutc(): dt.dst gave inconsistent " + "results; cannot convert") + return dt + dtdst + + @_validate_fromutc_inputs + def fromutc(self, dt): + """ + Given a timezone-aware datetime in a given timezone, calculates a + timezone-aware datetime in a new timezone. + + Since this is the one time that we *know* we have an unambiguous + datetime object, we take this opportunity to determine whether the + datetime is ambiguous and in a "fold" state (e.g. if it's the first + occurance, chronologically, of the ambiguous datetime). + + :param dt: + A timezone-aware :class:`datetime.datetime` object. + """ + dt_wall = self._fromutc(dt) + + # Calculate the fold status given the two datetimes. + _fold = self._fold_status(dt, dt_wall) + + # Set the default fold value for ambiguous dates + return enfold(dt_wall, fold=_fold) + + +class tzrangebase(_tzinfo): + """ + This is an abstract base class for time zones represented by an annual + transition into and out of DST. Child classes should implement the following + methods: + + * ``__init__(self, *args, **kwargs)`` + * ``transitions(self, year)`` - this is expected to return a tuple of + datetimes representing the DST on and off transitions in standard + time. + + A fully initialized ``tzrangebase`` subclass should also provide the + following attributes: + * ``hasdst``: Boolean whether or not the zone uses DST. + * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects + representing the respective UTC offsets. + * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short + abbreviations in DST and STD, respectively. + * ``_hasdst``: Whether or not the zone has DST. + + .. versionadded:: 2.6.0 + """ + def __init__(self): + raise NotImplementedError('tzrangebase is an abstract base class') + + def utcoffset(self, dt): + isdst = self._isdst(dt) + + if isdst is None: + return None + elif isdst: + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + isdst = self._isdst(dt) + + if isdst is None: + return None + elif isdst: + return self._dst_base_offset + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + if self._isdst(dt): + return self._dst_abbr + else: + return self._std_abbr + + def fromutc(self, dt): + """ Given a datetime in UTC, return local time """ + if not isinstance(dt, datetime): + raise TypeError("fromutc() requires a datetime argument") + + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + # Get transitions - if there are none, fixed offset + transitions = self.transitions(dt.year) + if transitions is None: + return dt + self.utcoffset(dt) + + # Get the transition times in UTC + dston, dstoff = transitions + + dston -= self._std_offset + dstoff -= self._std_offset + + utc_transitions = (dston, dstoff) + dt_utc = dt.replace(tzinfo=None) + + isdst = self._naive_isdst(dt_utc, utc_transitions) + + if isdst: + dt_wall = dt + self._dst_offset + else: + dt_wall = dt + self._std_offset + + _fold = int(not isdst and self.is_ambiguous(dt_wall)) + + return enfold(dt_wall, fold=_fold) + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + if not self.hasdst: + return False + + start, end = self.transitions(dt.year) + + dt = dt.replace(tzinfo=None) + return (end <= dt < end + self._dst_base_offset) + + def _isdst(self, dt): + if not self.hasdst: + return False + elif dt is None: + return None + + transitions = self.transitions(dt.year) + + if transitions is None: + return False + + dt = dt.replace(tzinfo=None) + + isdst = self._naive_isdst(dt, transitions) + + # Handle ambiguous dates + if not isdst and self.is_ambiguous(dt): + return not self._fold(dt) + else: + return isdst + + def _naive_isdst(self, dt, transitions): + dston, dstoff = transitions + + dt = dt.replace(tzinfo=None) + + if dston < dstoff: + isdst = dston <= dt < dstoff + else: + isdst = not dstoff <= dt < dston + + return isdst + + @property + def _dst_base_offset(self): + return self._dst_offset - self._std_offset + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(...)" % self.__class__.__name__ + + __reduce__ = object.__reduce__ diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/tz/_factories.py b/thesisenv/lib/python3.6/site-packages/dateutil/tz/_factories.py new file mode 100644 index 0000000..de2e0c1 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/dateutil/tz/_factories.py @@ -0,0 +1,49 @@ +from datetime import timedelta + + +class _TzSingleton(type): + def __init__(cls, *args, **kwargs): + cls.__instance = None + super(_TzSingleton, cls).__init__(*args, **kwargs) + + def __call__(cls): + if cls.__instance is None: + cls.__instance = super(_TzSingleton, cls).__call__() + return cls.__instance + +class _TzFactory(type): + def instance(cls, *args, **kwargs): + """Alternate constructor that returns a fresh instance""" + return type.__call__(cls, *args, **kwargs) + + +class _TzOffsetFactory(_TzFactory): + def __init__(cls, *args, **kwargs): + cls.__instances = {} + + def __call__(cls, name, offset): + if isinstance(offset, timedelta): + key = (name, offset.total_seconds()) + else: + key = (name, offset) + + instance = cls.__instances.get(key, None) + if instance is None: + instance = cls.__instances.setdefault(key, + cls.instance(name, offset)) + return instance + + +class _TzStrFactory(_TzFactory): + def __init__(cls, *args, **kwargs): + cls.__instances = {} + + def __call__(cls, s, posix_offset=False): + key = (s, posix_offset) + instance = cls.__instances.get(key, None) + + if instance is None: + instance = cls.__instances.setdefault(key, + cls.instance(s, posix_offset)) + return instance + diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/tz/tz.py b/thesisenv/lib/python3.6/site-packages/dateutil/tz/tz.py new file mode 100644 index 0000000..ac82b9c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/dateutil/tz/tz.py @@ -0,0 +1,1785 @@ +# -*- coding: utf-8 -*- +""" +This module offers timezone implementations subclassing the abstract +:py:class:`datetime.tzinfo` type. There are classes to handle tzfile format +files (usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, +etc), TZ environment string (in all known formats), given ranges (with help +from relative deltas), local machine timezone, fixed offset timezone, and UTC +timezone. +""" +import datetime +import struct +import time +import sys +import os +import bisect + +import six +from six import string_types +from six.moves import _thread +from ._common import tzname_in_python2, _tzinfo +from ._common import tzrangebase, enfold +from ._common import _validate_fromutc_inputs + +from ._factories import _TzSingleton, _TzOffsetFactory +from ._factories import _TzStrFactory +try: + from .win import tzwin, tzwinlocal +except ImportError: + tzwin = tzwinlocal = None + +ZERO = datetime.timedelta(0) +EPOCH = datetime.datetime.utcfromtimestamp(0) +EPOCHORDINAL = EPOCH.toordinal() + + +@six.add_metaclass(_TzSingleton) +class tzutc(datetime.tzinfo): + """ + This is a tzinfo object that represents the UTC time zone. + + **Examples:** + + .. doctest:: + + >>> from datetime import * + >>> from dateutil.tz import * + + >>> datetime.now() + datetime.datetime(2003, 9, 27, 9, 40, 1, 521290) + + >>> datetime.now(tzutc()) + datetime.datetime(2003, 9, 27, 12, 40, 12, 156379, tzinfo=tzutc()) + + >>> datetime.now(tzutc()).tzname() + 'UTC' + + .. versionchanged:: 2.7.0 + ``tzutc()`` is now a singleton, so the result of ``tzutc()`` will + always return the same object. + + .. doctest:: + + >>> from dateutil.tz import tzutc, UTC + >>> tzutc() is tzutc() + True + >>> tzutc() is UTC + True + """ + def utcoffset(self, dt): + return ZERO + + def dst(self, dt): + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return "UTC" + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + return False + + @_validate_fromutc_inputs + def fromutc(self, dt): + """ + Fast track version of fromutc() returns the original ``dt`` object for + any valid :py:class:`datetime.datetime` object. + """ + return dt + + def __eq__(self, other): + if not isinstance(other, (tzutc, tzoffset)): + return NotImplemented + + return (isinstance(other, tzutc) or + (isinstance(other, tzoffset) and other._offset == ZERO)) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +@six.add_metaclass(_TzOffsetFactory) +class tzoffset(datetime.tzinfo): + """ + A simple class for representing a fixed offset from UTC. + + :param name: + The timezone name, to be returned when ``tzname()`` is called. + :param offset: + The time zone offset in seconds, or (since version 2.6.0, represented + as a :py:class:`datetime.timedelta` object). + """ + def __init__(self, name, offset): + self._name = name + + try: + # Allow a timedelta + offset = offset.total_seconds() + except (TypeError, AttributeError): + pass + self._offset = datetime.timedelta(seconds=offset) + + def utcoffset(self, dt): + return self._offset + + def dst(self, dt): + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._name + + @_validate_fromutc_inputs + def fromutc(self, dt): + return dt + self._offset + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + return False + + def __eq__(self, other): + if not isinstance(other, tzoffset): + return NotImplemented + + return self._offset == other._offset + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(%s, %s)" % (self.__class__.__name__, + repr(self._name), + int(self._offset.total_seconds())) + + __reduce__ = object.__reduce__ + + +class tzlocal(_tzinfo): + """ + A :class:`tzinfo` subclass built around the ``time`` timezone functions. + """ + def __init__(self): + super(tzlocal, self).__init__() + + self._std_offset = datetime.timedelta(seconds=-time.timezone) + if time.daylight: + self._dst_offset = datetime.timedelta(seconds=-time.altzone) + else: + self._dst_offset = self._std_offset + + self._dst_saved = self._dst_offset - self._std_offset + self._hasdst = bool(self._dst_saved) + self._tznames = tuple(time.tzname) + + def utcoffset(self, dt): + if dt is None and self._hasdst: + return None + + if self._isdst(dt): + return self._dst_offset + else: + return self._std_offset + + def dst(self, dt): + if dt is None and self._hasdst: + return None + + if self._isdst(dt): + return self._dst_offset - self._std_offset + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._tznames[self._isdst(dt)] + + def is_ambiguous(self, dt): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + naive_dst = self._naive_is_dst(dt) + return (not naive_dst and + (naive_dst != self._naive_is_dst(dt - self._dst_saved))) + + def _naive_is_dst(self, dt): + timestamp = _datetime_to_timestamp(dt) + return time.localtime(timestamp + time.timezone).tm_isdst + + def _isdst(self, dt, fold_naive=True): + # We can't use mktime here. It is unstable when deciding if + # the hour near to a change is DST or not. + # + # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour, + # dt.minute, dt.second, dt.weekday(), 0, -1)) + # return time.localtime(timestamp).tm_isdst + # + # The code above yields the following result: + # + # >>> import tz, datetime + # >>> t = tz.tzlocal() + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRDT' + # >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname() + # 'BRST' + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRST' + # >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname() + # 'BRDT' + # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() + # 'BRDT' + # + # Here is a more stable implementation: + # + if not self._hasdst: + return False + + # Check for ambiguous times: + dstval = self._naive_is_dst(dt) + fold = getattr(dt, 'fold', None) + + if self.is_ambiguous(dt): + if fold is not None: + return not self._fold(dt) + else: + return True + + return dstval + + def __eq__(self, other): + if isinstance(other, tzlocal): + return (self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset) + elif isinstance(other, tzutc): + return (not self._hasdst and + self._tznames[0] in {'UTC', 'GMT'} and + self._std_offset == ZERO) + elif isinstance(other, tzoffset): + return (not self._hasdst and + self._tznames[0] == other._name and + self._std_offset == other._offset) + else: + return NotImplemented + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s()" % self.__class__.__name__ + + __reduce__ = object.__reduce__ + + +class _ttinfo(object): + __slots__ = ["offset", "delta", "isdst", "abbr", + "isstd", "isgmt", "dstoffset"] + + def __init__(self): + for attr in self.__slots__: + setattr(self, attr, None) + + def __repr__(self): + l = [] + for attr in self.__slots__: + value = getattr(self, attr) + if value is not None: + l.append("%s=%s" % (attr, repr(value))) + return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) + + def __eq__(self, other): + if not isinstance(other, _ttinfo): + return NotImplemented + + return (self.offset == other.offset and + self.delta == other.delta and + self.isdst == other.isdst and + self.abbr == other.abbr and + self.isstd == other.isstd and + self.isgmt == other.isgmt and + self.dstoffset == other.dstoffset) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __getstate__(self): + state = {} + for name in self.__slots__: + state[name] = getattr(self, name, None) + return state + + def __setstate__(self, state): + for name in self.__slots__: + if name in state: + setattr(self, name, state[name]) + + +class _tzfile(object): + """ + Lightweight class for holding the relevant transition and time zone + information read from binary tzfiles. + """ + attrs = ['trans_list', 'trans_list_utc', 'trans_idx', 'ttinfo_list', + 'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first'] + + def __init__(self, **kwargs): + for attr in self.attrs: + setattr(self, attr, kwargs.get(attr, None)) + + +class tzfile(_tzinfo): + """ + This is a ``tzinfo`` subclass thant allows one to use the ``tzfile(5)`` + format timezone files to extract current and historical zone information. + + :param fileobj: + This can be an opened file stream or a file name that the time zone + information can be read from. + + :param filename: + This is an optional parameter specifying the source of the time zone + information in the event that ``fileobj`` is a file object. If omitted + and ``fileobj`` is a file stream, this parameter will be set either to + ``fileobj``'s ``name`` attribute or to ``repr(fileobj)``. + + See `Sources for Time Zone and Daylight Saving Time Data + `_ for more information. + Time zone files can be compiled from the `IANA Time Zone database files + `_ with the `zic time zone compiler + `_ + + .. note:: + + Only construct a ``tzfile`` directly if you have a specific timezone + file on disk that you want to read into a Python ``tzinfo`` object. + If you want to get a ``tzfile`` representing a specific IANA zone, + (e.g. ``'America/New_York'``), you should call + :func:`dateutil.tz.gettz` with the zone identifier. + + + **Examples:** + + Using the US Eastern time zone as an example, we can see that a ``tzfile`` + provides time zone information for the standard Daylight Saving offsets: + + .. testsetup:: tzfile + + from dateutil.tz import gettz + from datetime import datetime + + .. doctest:: tzfile + + >>> NYC = gettz('America/New_York') + >>> NYC + tzfile('/usr/share/zoneinfo/America/New_York') + + >>> print(datetime(2016, 1, 3, tzinfo=NYC)) # EST + 2016-01-03 00:00:00-05:00 + + >>> print(datetime(2016, 7, 7, tzinfo=NYC)) # EDT + 2016-07-07 00:00:00-04:00 + + + The ``tzfile`` structure contains a fully history of the time zone, + so historical dates will also have the right offsets. For example, before + the adoption of the UTC standards, New York used local solar mean time: + + .. doctest:: tzfile + + >>> print(datetime(1901, 4, 12, tzinfo=NYC)) # LMT + 1901-04-12 00:00:00-04:56 + + And during World War II, New York was on "Eastern War Time", which was a + state of permanent daylight saving time: + + .. doctest:: tzfile + + >>> print(datetime(1944, 2, 7, tzinfo=NYC)) # EWT + 1944-02-07 00:00:00-04:00 + + """ + + def __init__(self, fileobj, filename=None): + super(tzfile, self).__init__() + + file_opened_here = False + if isinstance(fileobj, string_types): + self._filename = fileobj + fileobj = open(fileobj, 'rb') + file_opened_here = True + elif filename is not None: + self._filename = filename + elif hasattr(fileobj, "name"): + self._filename = fileobj.name + else: + self._filename = repr(fileobj) + + if fileobj is not None: + if not file_opened_here: + fileobj = _ContextWrapper(fileobj) + + with fileobj as file_stream: + tzobj = self._read_tzfile(file_stream) + + self._set_tzdata(tzobj) + + def _set_tzdata(self, tzobj): + """ Set the time zone data of this object from a _tzfile object """ + # Copy the relevant attributes over as private attributes + for attr in _tzfile.attrs: + setattr(self, '_' + attr, getattr(tzobj, attr)) + + def _read_tzfile(self, fileobj): + out = _tzfile() + + # From tzfile(5): + # + # The time zone information files used by tzset(3) + # begin with the magic characters "TZif" to identify + # them as time zone information files, followed by + # sixteen bytes reserved for future use, followed by + # six four-byte values of type long, written in a + # ``standard'' byte order (the high-order byte + # of the value is written first). + if fileobj.read(4).decode() != "TZif": + raise ValueError("magic not found") + + fileobj.read(16) + + ( + # The number of UTC/local indicators stored in the file. + ttisgmtcnt, + + # The number of standard/wall indicators stored in the file. + ttisstdcnt, + + # The number of leap seconds for which data is + # stored in the file. + leapcnt, + + # The number of "transition times" for which data + # is stored in the file. + timecnt, + + # The number of "local time types" for which data + # is stored in the file (must not be zero). + typecnt, + + # The number of characters of "time zone + # abbreviation strings" stored in the file. + charcnt, + + ) = struct.unpack(">6l", fileobj.read(24)) + + # The above header is followed by tzh_timecnt four-byte + # values of type long, sorted in ascending order. + # These values are written in ``standard'' byte order. + # Each is used as a transition time (as returned by + # time(2)) at which the rules for computing local time + # change. + + if timecnt: + out.trans_list_utc = list(struct.unpack(">%dl" % timecnt, + fileobj.read(timecnt*4))) + else: + out.trans_list_utc = [] + + # Next come tzh_timecnt one-byte values of type unsigned + # char; each one tells which of the different types of + # ``local time'' types described in the file is associated + # with the same-indexed transition time. These values + # serve as indices into an array of ttinfo structures that + # appears next in the file. + + if timecnt: + out.trans_idx = struct.unpack(">%dB" % timecnt, + fileobj.read(timecnt)) + else: + out.trans_idx = [] + + # Each ttinfo structure is written as a four-byte value + # for tt_gmtoff of type long, in a standard byte + # order, followed by a one-byte value for tt_isdst + # and a one-byte value for tt_abbrind. In each + # structure, tt_gmtoff gives the number of + # seconds to be added to UTC, tt_isdst tells whether + # tm_isdst should be set by localtime(3), and + # tt_abbrind serves as an index into the array of + # time zone abbreviation characters that follow the + # ttinfo structure(s) in the file. + + ttinfo = [] + + for i in range(typecnt): + ttinfo.append(struct.unpack(">lbb", fileobj.read(6))) + + abbr = fileobj.read(charcnt).decode() + + # Then there are tzh_leapcnt pairs of four-byte + # values, written in standard byte order; the + # first value of each pair gives the time (as + # returned by time(2)) at which a leap second + # occurs; the second gives the total number of + # leap seconds to be applied after the given time. + # The pairs of values are sorted in ascending order + # by time. + + # Not used, for now (but seek for correct file position) + if leapcnt: + fileobj.seek(leapcnt * 8, os.SEEK_CUR) + + # Then there are tzh_ttisstdcnt standard/wall + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as standard + # time or wall clock time, and are used when + # a time zone file is used in handling POSIX-style + # time zone environment variables. + + if ttisstdcnt: + isstd = struct.unpack(">%db" % ttisstdcnt, + fileobj.read(ttisstdcnt)) + + # Finally, there are tzh_ttisgmtcnt UTC/local + # indicators, each stored as a one-byte value; + # they tell whether the transition times associated + # with local time types were specified as UTC or + # local time, and are used when a time zone file + # is used in handling POSIX-style time zone envi- + # ronment variables. + + if ttisgmtcnt: + isgmt = struct.unpack(">%db" % ttisgmtcnt, + fileobj.read(ttisgmtcnt)) + + # Build ttinfo list + out.ttinfo_list = [] + for i in range(typecnt): + gmtoff, isdst, abbrind = ttinfo[i] + # Round to full-minutes if that's not the case. Python's + # datetime doesn't accept sub-minute timezones. Check + # http://python.org/sf/1447945 for some information. + gmtoff = 60 * ((gmtoff + 30) // 60) + tti = _ttinfo() + tti.offset = gmtoff + tti.dstoffset = datetime.timedelta(0) + tti.delta = datetime.timedelta(seconds=gmtoff) + tti.isdst = isdst + tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)] + tti.isstd = (ttisstdcnt > i and isstd[i] != 0) + tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0) + out.ttinfo_list.append(tti) + + # Replace ttinfo indexes for ttinfo objects. + out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx] + + # Set standard, dst, and before ttinfos. before will be + # used when a given time is before any transitions, + # and will be set to the first non-dst ttinfo, or to + # the first dst, if all of them are dst. + out.ttinfo_std = None + out.ttinfo_dst = None + out.ttinfo_before = None + if out.ttinfo_list: + if not out.trans_list_utc: + out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0] + else: + for i in range(timecnt-1, -1, -1): + tti = out.trans_idx[i] + if not out.ttinfo_std and not tti.isdst: + out.ttinfo_std = tti + elif not out.ttinfo_dst and tti.isdst: + out.ttinfo_dst = tti + + if out.ttinfo_std and out.ttinfo_dst: + break + else: + if out.ttinfo_dst and not out.ttinfo_std: + out.ttinfo_std = out.ttinfo_dst + + for tti in out.ttinfo_list: + if not tti.isdst: + out.ttinfo_before = tti + break + else: + out.ttinfo_before = out.ttinfo_list[0] + + # Now fix transition times to become relative to wall time. + # + # I'm not sure about this. In my tests, the tz source file + # is setup to wall time, and in the binary file isstd and + # isgmt are off, so it should be in wall time. OTOH, it's + # always in gmt time. Let me know if you have comments + # about this. + laststdoffset = None + out.trans_list = [] + for i, tti in enumerate(out.trans_idx): + if not tti.isdst: + offset = tti.offset + laststdoffset = offset + else: + if laststdoffset is not None: + # Store the DST offset as well and update it in the list + tti.dstoffset = tti.offset - laststdoffset + out.trans_idx[i] = tti + + offset = laststdoffset or 0 + + out.trans_list.append(out.trans_list_utc[i] + offset) + + # In case we missed any DST offsets on the way in for some reason, make + # a second pass over the list, looking for the /next/ DST offset. + laststdoffset = None + for i in reversed(range(len(out.trans_idx))): + tti = out.trans_idx[i] + if tti.isdst: + if not (tti.dstoffset or laststdoffset is None): + tti.dstoffset = tti.offset - laststdoffset + else: + laststdoffset = tti.offset + + if not isinstance(tti.dstoffset, datetime.timedelta): + tti.dstoffset = datetime.timedelta(seconds=tti.dstoffset) + + out.trans_idx[i] = tti + + out.trans_idx = tuple(out.trans_idx) + out.trans_list = tuple(out.trans_list) + out.trans_list_utc = tuple(out.trans_list_utc) + + return out + + def _find_last_transition(self, dt, in_utc=False): + # If there's no list, there are no transitions to find + if not self._trans_list: + return None + + timestamp = _datetime_to_timestamp(dt) + + # Find where the timestamp fits in the transition list - if the + # timestamp is a transition time, it's part of the "after" period. + trans_list = self._trans_list_utc if in_utc else self._trans_list + idx = bisect.bisect_right(trans_list, timestamp) + + # We want to know when the previous transition was, so subtract off 1 + return idx - 1 + + def _get_ttinfo(self, idx): + # For no list or after the last transition, default to _ttinfo_std + if idx is None or (idx + 1) >= len(self._trans_list): + return self._ttinfo_std + + # If there is a list and the time is before it, return _ttinfo_before + if idx < 0: + return self._ttinfo_before + + return self._trans_idx[idx] + + def _find_ttinfo(self, dt): + idx = self._resolve_ambiguous_time(dt) + + return self._get_ttinfo(idx) + + def fromutc(self, dt): + """ + The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`. + + :param dt: + A :py:class:`datetime.datetime` object. + + :raises TypeError: + Raised if ``dt`` is not a :py:class:`datetime.datetime` object. + + :raises ValueError: + Raised if this is called with a ``dt`` which does not have this + ``tzinfo`` attached. + + :return: + Returns a :py:class:`datetime.datetime` object representing the + wall time in ``self``'s time zone. + """ + # These isinstance checks are in datetime.tzinfo, so we'll preserve + # them, even if we don't care about duck typing. + if not isinstance(dt, datetime.datetime): + raise TypeError("fromutc() requires a datetime argument") + + if dt.tzinfo is not self: + raise ValueError("dt.tzinfo is not self") + + # First treat UTC as wall time and get the transition we're in. + idx = self._find_last_transition(dt, in_utc=True) + tti = self._get_ttinfo(idx) + + dt_out = dt + datetime.timedelta(seconds=tti.offset) + + fold = self.is_ambiguous(dt_out, idx=idx) + + return enfold(dt_out, fold=int(fold)) + + def is_ambiguous(self, dt, idx=None): + """ + Whether or not the "wall time" of a given datetime is ambiguous in this + zone. + + :param dt: + A :py:class:`datetime.datetime`, naive or time zone aware. + + + :return: + Returns ``True`` if ambiguous, ``False`` otherwise. + + .. versionadded:: 2.6.0 + """ + if idx is None: + idx = self._find_last_transition(dt) + + # Calculate the difference in offsets from current to previous + timestamp = _datetime_to_timestamp(dt) + tti = self._get_ttinfo(idx) + + if idx is None or idx <= 0: + return False + + od = self._get_ttinfo(idx - 1).offset - tti.offset + tt = self._trans_list[idx] # Transition time + + return timestamp < tt + od + + def _resolve_ambiguous_time(self, dt): + idx = self._find_last_transition(dt) + + # If we have no transitions, return the index + _fold = self._fold(dt) + if idx is None or idx == 0: + return idx + + # If it's ambiguous and we're in a fold, shift to a different index. + idx_offset = int(not _fold and self.is_ambiguous(dt, idx)) + + return idx - idx_offset + + def utcoffset(self, dt): + if dt is None: + return None + + if not self._ttinfo_std: + return ZERO + + return self._find_ttinfo(dt).delta + + def dst(self, dt): + if dt is None: + return None + + if not self._ttinfo_dst: + return ZERO + + tti = self._find_ttinfo(dt) + + if not tti.isdst: + return ZERO + + # The documentation says that utcoffset()-dst() must + # be constant for every dt. + return tti.dstoffset + + @tzname_in_python2 + def tzname(self, dt): + if not self._ttinfo_std or dt is None: + return None + return self._find_ttinfo(dt).abbr + + def __eq__(self, other): + if not isinstance(other, tzfile): + return NotImplemented + return (self._trans_list == other._trans_list and + self._trans_idx == other._trans_idx and + self._ttinfo_list == other._ttinfo_list) + + __hash__ = None + + def __ne__(self, other): + return not (self == other) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._filename)) + + def __reduce__(self): + return self.__reduce_ex__(None) + + def __reduce_ex__(self, protocol): + return (self.__class__, (None, self._filename), self.__dict__) + + +class tzrange(tzrangebase): + """ + The ``tzrange`` object is a time zone specified by a set of offsets and + abbreviations, equivalent to the way the ``TZ`` variable can be specified + in POSIX-like systems, but using Python delta objects to specify DST + start, end and offsets. + + :param stdabbr: + The abbreviation for standard time (e.g. ``'EST'``). + + :param stdoffset: + An integer or :class:`datetime.timedelta` object or equivalent + specifying the base offset from UTC. + + If unspecified, +00:00 is used. + + :param dstabbr: + The abbreviation for DST / "Summer" time (e.g. ``'EDT'``). + + If specified, with no other DST information, DST is assumed to occur + and the default behavior or ``dstoffset``, ``start`` and ``end`` is + used. If unspecified and no other DST information is specified, it + is assumed that this zone has no DST. + + If this is unspecified and other DST information is *is* specified, + DST occurs in the zone but the time zone abbreviation is left + unchanged. + + :param dstoffset: + A an integer or :class:`datetime.timedelta` object or equivalent + specifying the UTC offset during DST. If unspecified and any other DST + information is specified, it is assumed to be the STD offset +1 hour. + + :param start: + A :class:`relativedelta.relativedelta` object or equivalent specifying + the time and time of year that daylight savings time starts. To + specify, for example, that DST starts at 2AM on the 2nd Sunday in + March, pass: + + ``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))`` + + If unspecified and any other DST information is specified, the default + value is 2 AM on the first Sunday in April. + + :param end: + A :class:`relativedelta.relativedelta` object or equivalent + representing the time and time of year that daylight savings time + ends, with the same specification method as in ``start``. One note is + that this should point to the first time in the *standard* zone, so if + a transition occurs at 2AM in the DST zone and the clocks are set back + 1 hour to 1AM, set the ``hours`` parameter to +1. + + + **Examples:** + + .. testsetup:: tzrange + + from dateutil.tz import tzrange, tzstr + + .. doctest:: tzrange + + >>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT") + True + + >>> from dateutil.relativedelta import * + >>> range1 = tzrange("EST", -18000, "EDT") + >>> range2 = tzrange("EST", -18000, "EDT", -14400, + ... relativedelta(hours=+2, month=4, day=1, + ... weekday=SU(+1)), + ... relativedelta(hours=+1, month=10, day=31, + ... weekday=SU(-1))) + >>> tzstr('EST5EDT') == range1 == range2 + True + + """ + def __init__(self, stdabbr, stdoffset=None, + dstabbr=None, dstoffset=None, + start=None, end=None): + + global relativedelta + from dateutil import relativedelta + + self._std_abbr = stdabbr + self._dst_abbr = dstabbr + + try: + stdoffset = stdoffset.total_seconds() + except (TypeError, AttributeError): + pass + + try: + dstoffset = dstoffset.total_seconds() + except (TypeError, AttributeError): + pass + + if stdoffset is not None: + self._std_offset = datetime.timedelta(seconds=stdoffset) + else: + self._std_offset = ZERO + + if dstoffset is not None: + self._dst_offset = datetime.timedelta(seconds=dstoffset) + elif dstabbr and stdoffset is not None: + self._dst_offset = self._std_offset + datetime.timedelta(hours=+1) + else: + self._dst_offset = ZERO + + if dstabbr and start is None: + self._start_delta = relativedelta.relativedelta( + hours=+2, month=4, day=1, weekday=relativedelta.SU(+1)) + else: + self._start_delta = start + + if dstabbr and end is None: + self._end_delta = relativedelta.relativedelta( + hours=+1, month=10, day=31, weekday=relativedelta.SU(-1)) + else: + self._end_delta = end + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = bool(self._start_delta) + + def transitions(self, year): + """ + For a given year, get the DST on and off transition times, expressed + always on the standard time side. For zones with no transitions, this + function returns ``None``. + + :param year: + The year whose transitions you would like to query. + + :return: + Returns a :class:`tuple` of :class:`datetime.datetime` objects, + ``(dston, dstoff)`` for zones with an annual DST transition, or + ``None`` for fixed offset zones. + """ + if not self.hasdst: + return None + + base_year = datetime.datetime(year, 1, 1) + + start = base_year + self._start_delta + end = base_year + self._end_delta + + return (start, end) + + def __eq__(self, other): + if not isinstance(other, tzrange): + return NotImplemented + + return (self._std_abbr == other._std_abbr and + self._dst_abbr == other._dst_abbr and + self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset and + self._start_delta == other._start_delta and + self._end_delta == other._end_delta) + + @property + def _dst_base_offset(self): + return self._dst_base_offset_ + + +@six.add_metaclass(_TzStrFactory) +class tzstr(tzrange): + """ + ``tzstr`` objects are time zone objects specified by a time-zone string as + it would be passed to a ``TZ`` variable on POSIX-style systems (see + the `GNU C Library: TZ Variable`_ for more details). + + There is one notable exception, which is that POSIX-style time zones use an + inverted offset format, so normally ``GMT+3`` would be parsed as an offset + 3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an + offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX + behavior, pass a ``True`` value to ``posix_offset``. + + The :class:`tzrange` object provides the same functionality, but is + specified using :class:`relativedelta.relativedelta` objects. rather than + strings. + + :param s: + A time zone string in ``TZ`` variable format. This can be a + :class:`bytes` (2.x: :class:`str`), :class:`str` (2.x: + :class:`unicode`) or a stream emitting unicode characters + (e.g. :class:`StringIO`). + + :param posix_offset: + Optional. If set to ``True``, interpret strings such as ``GMT+3`` or + ``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the + POSIX standard. + + .. caution:: + + Prior to version 2.7.0, this function also supported time zones + in the format: + + * ``EST5EDT,4,0,6,7200,10,0,26,7200,3600`` + * ``EST5EDT,4,1,0,7200,10,-1,0,7200,3600`` + + This format is non-standard and has been deprecated; this function + will raise a :class:`DeprecatedTZFormatWarning` until + support is removed in a future version. + + .. _`GNU C Library: TZ Variable`: + https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html + """ + def __init__(self, s, posix_offset=False): + global parser + from dateutil.parser import _parser as parser + + self._s = s + + res = parser._parsetz(s) + if res is None or res.any_unused_tokens: + raise ValueError("unknown string format") + + # Here we break the compatibility with the TZ variable handling. + # GMT-3 actually *means* the timezone -3. + if res.stdabbr in ("GMT", "UTC") and not posix_offset: + res.stdoffset *= -1 + + # We must initialize it first, since _delta() needs + # _std_offset and _dst_offset set. Use False in start/end + # to avoid building it two times. + tzrange.__init__(self, res.stdabbr, res.stdoffset, + res.dstabbr, res.dstoffset, + start=False, end=False) + + if not res.dstabbr: + self._start_delta = None + self._end_delta = None + else: + self._start_delta = self._delta(res.start) + if self._start_delta: + self._end_delta = self._delta(res.end, isend=1) + + self.hasdst = bool(self._start_delta) + + def _delta(self, x, isend=0): + from dateutil import relativedelta + kwargs = {} + if x.month is not None: + kwargs["month"] = x.month + if x.weekday is not None: + kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week) + if x.week > 0: + kwargs["day"] = 1 + else: + kwargs["day"] = 31 + elif x.day: + kwargs["day"] = x.day + elif x.yday is not None: + kwargs["yearday"] = x.yday + elif x.jyday is not None: + kwargs["nlyearday"] = x.jyday + if not kwargs: + # Default is to start on first sunday of april, and end + # on last sunday of october. + if not isend: + kwargs["month"] = 4 + kwargs["day"] = 1 + kwargs["weekday"] = relativedelta.SU(+1) + else: + kwargs["month"] = 10 + kwargs["day"] = 31 + kwargs["weekday"] = relativedelta.SU(-1) + if x.time is not None: + kwargs["seconds"] = x.time + else: + # Default is 2AM. + kwargs["seconds"] = 7200 + if isend: + # Convert to standard time, to follow the documented way + # of working with the extra hour. See the documentation + # of the tzinfo class. + delta = self._dst_offset - self._std_offset + kwargs["seconds"] -= delta.seconds + delta.days * 86400 + return relativedelta.relativedelta(**kwargs) + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._s)) + + +class _tzicalvtzcomp(object): + def __init__(self, tzoffsetfrom, tzoffsetto, isdst, + tzname=None, rrule=None): + self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom) + self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto) + self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom + self.isdst = isdst + self.tzname = tzname + self.rrule = rrule + + +class _tzicalvtz(_tzinfo): + def __init__(self, tzid, comps=[]): + super(_tzicalvtz, self).__init__() + + self._tzid = tzid + self._comps = comps + self._cachedate = [] + self._cachecomp = [] + self._cache_lock = _thread.allocate_lock() + + def _find_comp(self, dt): + if len(self._comps) == 1: + return self._comps[0] + + dt = dt.replace(tzinfo=None) + + try: + with self._cache_lock: + return self._cachecomp[self._cachedate.index( + (dt, self._fold(dt)))] + except ValueError: + pass + + lastcompdt = None + lastcomp = None + + for comp in self._comps: + compdt = self._find_compdt(comp, dt) + + if compdt and (not lastcompdt or lastcompdt < compdt): + lastcompdt = compdt + lastcomp = comp + + if not lastcomp: + # RFC says nothing about what to do when a given + # time is before the first onset date. We'll look for the + # first standard component, or the first component, if + # none is found. + for comp in self._comps: + if not comp.isdst: + lastcomp = comp + break + else: + lastcomp = comp[0] + + with self._cache_lock: + self._cachedate.insert(0, (dt, self._fold(dt))) + self._cachecomp.insert(0, lastcomp) + + if len(self._cachedate) > 10: + self._cachedate.pop() + self._cachecomp.pop() + + return lastcomp + + def _find_compdt(self, comp, dt): + if comp.tzoffsetdiff < ZERO and self._fold(dt): + dt -= comp.tzoffsetdiff + + compdt = comp.rrule.before(dt, inc=True) + + return compdt + + def utcoffset(self, dt): + if dt is None: + return None + + return self._find_comp(dt).tzoffsetto + + def dst(self, dt): + comp = self._find_comp(dt) + if comp.isdst: + return comp.tzoffsetdiff + else: + return ZERO + + @tzname_in_python2 + def tzname(self, dt): + return self._find_comp(dt).tzname + + def __repr__(self): + return "" % repr(self._tzid) + + __reduce__ = object.__reduce__ + + +class tzical(object): + """ + This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure + as set out in `RFC 5545`_ Section 4.6.5 into one or more `tzinfo` objects. + + :param `fileobj`: + A file or stream in iCalendar format, which should be UTF-8 encoded + with CRLF endings. + + .. _`RFC 5545`: https://tools.ietf.org/html/rfc5545 + """ + def __init__(self, fileobj): + global rrule + from dateutil import rrule + + if isinstance(fileobj, string_types): + self._s = fileobj + # ical should be encoded in UTF-8 with CRLF + fileobj = open(fileobj, 'r') + else: + self._s = getattr(fileobj, 'name', repr(fileobj)) + fileobj = _ContextWrapper(fileobj) + + self._vtz = {} + + with fileobj as fobj: + self._parse_rfc(fobj.read()) + + def keys(self): + """ + Retrieves the available time zones as a list. + """ + return list(self._vtz.keys()) + + def get(self, tzid=None): + """ + Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``. + + :param tzid: + If there is exactly one time zone available, omitting ``tzid`` + or passing :py:const:`None` value returns it. Otherwise a valid + key (which can be retrieved from :func:`keys`) is required. + + :raises ValueError: + Raised if ``tzid`` is not specified but there are either more + or fewer than 1 zone defined. + + :returns: + Returns either a :py:class:`datetime.tzinfo` object representing + the relevant time zone or :py:const:`None` if the ``tzid`` was + not found. + """ + if tzid is None: + if len(self._vtz) == 0: + raise ValueError("no timezones defined") + elif len(self._vtz) > 1: + raise ValueError("more than one timezone available") + tzid = next(iter(self._vtz)) + + return self._vtz.get(tzid) + + def _parse_offset(self, s): + s = s.strip() + if not s: + raise ValueError("empty offset") + if s[0] in ('+', '-'): + signal = (-1, +1)[s[0] == '+'] + s = s[1:] + else: + signal = +1 + if len(s) == 4: + return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal + elif len(s) == 6: + return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal + else: + raise ValueError("invalid offset: " + s) + + def _parse_rfc(self, s): + lines = s.splitlines() + if not lines: + raise ValueError("empty string") + + # Unfold + i = 0 + while i < len(lines): + line = lines[i].rstrip() + if not line: + del lines[i] + elif i > 0 and line[0] == " ": + lines[i-1] += line[1:] + del lines[i] + else: + i += 1 + + tzid = None + comps = [] + invtz = False + comptype = None + for line in lines: + if not line: + continue + name, value = line.split(':', 1) + parms = name.split(';') + if not parms: + raise ValueError("empty property name") + name = parms[0].upper() + parms = parms[1:] + if invtz: + if name == "BEGIN": + if value in ("STANDARD", "DAYLIGHT"): + # Process component + pass + else: + raise ValueError("unknown component: "+value) + comptype = value + founddtstart = False + tzoffsetfrom = None + tzoffsetto = None + rrulelines = [] + tzname = None + elif name == "END": + if value == "VTIMEZONE": + if comptype: + raise ValueError("component not closed: "+comptype) + if not tzid: + raise ValueError("mandatory TZID not found") + if not comps: + raise ValueError( + "at least one component is needed") + # Process vtimezone + self._vtz[tzid] = _tzicalvtz(tzid, comps) + invtz = False + elif value == comptype: + if not founddtstart: + raise ValueError("mandatory DTSTART not found") + if tzoffsetfrom is None: + raise ValueError( + "mandatory TZOFFSETFROM not found") + if tzoffsetto is None: + raise ValueError( + "mandatory TZOFFSETFROM not found") + # Process component + rr = None + if rrulelines: + rr = rrule.rrulestr("\n".join(rrulelines), + compatible=True, + ignoretz=True, + cache=True) + comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto, + (comptype == "DAYLIGHT"), + tzname, rr) + comps.append(comp) + comptype = None + else: + raise ValueError("invalid component end: "+value) + elif comptype: + if name == "DTSTART": + # DTSTART in VTIMEZONE takes a subset of valid RRULE + # values under RFC 5545. + for parm in parms: + if parm != 'VALUE=DATE-TIME': + msg = ('Unsupported DTSTART param in ' + + 'VTIMEZONE: ' + parm) + raise ValueError(msg) + rrulelines.append(line) + founddtstart = True + elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"): + rrulelines.append(line) + elif name == "TZOFFSETFROM": + if parms: + raise ValueError( + "unsupported %s parm: %s " % (name, parms[0])) + tzoffsetfrom = self._parse_offset(value) + elif name == "TZOFFSETTO": + if parms: + raise ValueError( + "unsupported TZOFFSETTO parm: "+parms[0]) + tzoffsetto = self._parse_offset(value) + elif name == "TZNAME": + if parms: + raise ValueError( + "unsupported TZNAME parm: "+parms[0]) + tzname = value + elif name == "COMMENT": + pass + else: + raise ValueError("unsupported property: "+name) + else: + if name == "TZID": + if parms: + raise ValueError( + "unsupported TZID parm: "+parms[0]) + tzid = value + elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"): + pass + else: + raise ValueError("unsupported property: "+name) + elif name == "BEGIN" and value == "VTIMEZONE": + tzid = None + comps = [] + invtz = True + + def __repr__(self): + return "%s(%s)" % (self.__class__.__name__, repr(self._s)) + + +if sys.platform != "win32": + TZFILES = ["/etc/localtime", "localtime"] + TZPATHS = ["/usr/share/zoneinfo", + "/usr/lib/zoneinfo", + "/usr/share/lib/zoneinfo", + "/etc/zoneinfo"] +else: + TZFILES = [] + TZPATHS = [] + + +def __get_gettz(): + tzlocal_classes = (tzlocal,) + if tzwinlocal is not None: + tzlocal_classes += (tzwinlocal,) + + class GettzFunc(object): + """ + Retrieve a time zone object from a string representation + + This function is intended to retrieve the :py:class:`tzinfo` subclass + that best represents the time zone that would be used if a POSIX + `TZ variable`_ were set to the same value. + + If no argument or an empty string is passed to ``gettz``, local time + is returned: + + .. code-block:: python3 + + >>> gettz() + tzfile('/etc/localtime') + + This function is also the preferred way to map IANA tz database keys + to :class:`tzfile` objects: + + .. code-block:: python3 + + >>> gettz('Pacific/Kiritimati') + tzfile('/usr/share/zoneinfo/Pacific/Kiritimati') + + On Windows, the standard is extended to include the Windows-specific + zone names provided by the operating system: + + .. code-block:: python3 + + >>> gettz('Egypt Standard Time') + tzwin('Egypt Standard Time') + + Passing a GNU ``TZ`` style string time zone specification returns a + :class:`tzstr` object: + + .. code-block:: python3 + + >>> gettz('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3') + tzstr('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3') + + :param name: + A time zone name (IANA, or, on Windows, Windows keys), location of + a ``tzfile(5)`` zoneinfo file or ``TZ`` variable style time zone + specifier. An empty string, no argument or ``None`` is interpreted + as local time. + + :return: + Returns an instance of one of ``dateutil``'s :py:class:`tzinfo` + subclasses. + + .. versionchanged:: 2.7.0 + + After version 2.7.0, any two calls to ``gettz`` using the same + input strings will return the same object: + + .. code-block:: python3 + + >>> tz.gettz('America/Chicago') is tz.gettz('America/Chicago') + True + + In addition to improving performance, this ensures that + `"same zone" semantics`_ are used for datetimes in the same zone. + + + .. _`TZ variable`: + https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html + + .. _`"same zone" semantics`: + https://blog.ganssle.io/articles/2018/02/aware-datetime-arithmetic.html + """ + def __init__(self): + + self.__instances = {} + self._cache_lock = _thread.allocate_lock() + + def __call__(self, name=None): + with self._cache_lock: + rv = self.__instances.get(name, None) + + if rv is None: + rv = self.nocache(name=name) + if not (name is None or isinstance(rv, tzlocal_classes)): + # tzlocal is slightly more complicated than the other + # time zone providers because it depends on environment + # at construction time, so don't cache that. + self.__instances[name] = rv + + return rv + + def cache_clear(self): + with self._cache_lock: + self.__instances = {} + + @staticmethod + def nocache(name=None): + """A non-cached version of gettz""" + tz = None + if not name: + try: + name = os.environ["TZ"] + except KeyError: + pass + if name is None or name == ":": + for filepath in TZFILES: + if not os.path.isabs(filepath): + filename = filepath + for path in TZPATHS: + filepath = os.path.join(path, filename) + if os.path.isfile(filepath): + break + else: + continue + if os.path.isfile(filepath): + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = tzlocal() + else: + if name.startswith(":"): + name = name[1:] + if os.path.isabs(name): + if os.path.isfile(name): + tz = tzfile(name) + else: + tz = None + else: + for path in TZPATHS: + filepath = os.path.join(path, name) + if not os.path.isfile(filepath): + filepath = filepath.replace(' ', '_') + if not os.path.isfile(filepath): + continue + try: + tz = tzfile(filepath) + break + except (IOError, OSError, ValueError): + pass + else: + tz = None + if tzwin is not None: + try: + tz = tzwin(name) + except WindowsError: + tz = None + + if not tz: + from dateutil.zoneinfo import get_zonefile_instance + tz = get_zonefile_instance().get(name) + + if not tz: + for c in name: + # name is not a tzstr unless it has at least + # one offset. For short values of "name", an + # explicit for loop seems to be the fastest way + # To determine if a string contains a digit + if c in "0123456789": + try: + tz = tzstr(name) + except ValueError: + pass + break + else: + if name in ("GMT", "UTC"): + tz = tzutc() + elif name in time.tzname: + tz = tzlocal() + return tz + + return GettzFunc() + + +gettz = __get_gettz() +del __get_gettz + + +def datetime_exists(dt, tz=None): + """ + Given a datetime and a time zone, determine whether or not a given datetime + would fall in a gap. + + :param dt: + A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` + is provided.) + + :param tz: + A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If + ``None`` or not provided, the datetime's own time zone will be used. + + :return: + Returns a boolean value whether or not the "wall time" exists in + ``tz``. + + .. versionadded:: 2.7.0 + """ + if tz is None: + if dt.tzinfo is None: + raise ValueError('Datetime is naive and no time zone provided.') + tz = dt.tzinfo + + dt = dt.replace(tzinfo=None) + + # This is essentially a test of whether or not the datetime can survive + # a round trip to UTC. + dt_rt = dt.replace(tzinfo=tz).astimezone(tzutc()).astimezone(tz) + dt_rt = dt_rt.replace(tzinfo=None) + + return dt == dt_rt + + +def datetime_ambiguous(dt, tz=None): + """ + Given a datetime and a time zone, determine whether or not a given datetime + is ambiguous (i.e if there are two times differentiated only by their DST + status). + + :param dt: + A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` + is provided.) + + :param tz: + A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If + ``None`` or not provided, the datetime's own time zone will be used. + + :return: + Returns a boolean value whether or not the "wall time" is ambiguous in + ``tz``. + + .. versionadded:: 2.6.0 + """ + if tz is None: + if dt.tzinfo is None: + raise ValueError('Datetime is naive and no time zone provided.') + + tz = dt.tzinfo + + # If a time zone defines its own "is_ambiguous" function, we'll use that. + is_ambiguous_fn = getattr(tz, 'is_ambiguous', None) + if is_ambiguous_fn is not None: + try: + return tz.is_ambiguous(dt) + except Exception: + pass + + # If it doesn't come out and tell us it's ambiguous, we'll just check if + # the fold attribute has any effect on this particular date and time. + dt = dt.replace(tzinfo=tz) + wall_0 = enfold(dt, fold=0) + wall_1 = enfold(dt, fold=1) + + same_offset = wall_0.utcoffset() == wall_1.utcoffset() + same_dst = wall_0.dst() == wall_1.dst() + + return not (same_offset and same_dst) + + +def resolve_imaginary(dt): + """ + Given a datetime that may be imaginary, return an existing datetime. + + This function assumes that an imaginary datetime represents what the + wall time would be in a zone had the offset transition not occurred, so + it will always fall forward by the transition's change in offset. + + .. doctest:: + + >>> from dateutil import tz + >>> from datetime import datetime + >>> NYC = tz.gettz('America/New_York') + >>> print(tz.resolve_imaginary(datetime(2017, 3, 12, 2, 30, tzinfo=NYC))) + 2017-03-12 03:30:00-04:00 + + >>> KIR = tz.gettz('Pacific/Kiritimati') + >>> print(tz.resolve_imaginary(datetime(1995, 1, 1, 12, 30, tzinfo=KIR))) + 1995-01-02 12:30:00+14:00 + + As a note, :func:`datetime.astimezone` is guaranteed to produce a valid, + existing datetime, so a round-trip to and from UTC is sufficient to get + an extant datetime, however, this generally "falls back" to an earlier time + rather than falling forward to the STD side (though no guarantees are made + about this behavior). + + :param dt: + A :class:`datetime.datetime` which may or may not exist. + + :return: + Returns an existing :class:`datetime.datetime`. If ``dt`` was not + imaginary, the datetime returned is guaranteed to be the same object + passed to the function. + + .. versionadded:: 2.7.0 + """ + if dt.tzinfo is not None and not datetime_exists(dt): + + curr_offset = (dt + datetime.timedelta(hours=24)).utcoffset() + old_offset = (dt - datetime.timedelta(hours=24)).utcoffset() + + dt += curr_offset - old_offset + + return dt + + +def _datetime_to_timestamp(dt): + """ + Convert a :class:`datetime.datetime` object to an epoch timestamp in + seconds since January 1, 1970, ignoring the time zone. + """ + return (dt.replace(tzinfo=None) - EPOCH).total_seconds() + + +class _ContextWrapper(object): + """ + Class for wrapping contexts so that they are passed through in a + with statement. + """ + def __init__(self, context): + self.context = context + + def __enter__(self): + return self.context + + def __exit__(*args, **kwargs): + pass + +# vim:ts=4:sw=4:et diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/tz/win.py b/thesisenv/lib/python3.6/site-packages/dateutil/tz/win.py new file mode 100644 index 0000000..def4353 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/dateutil/tz/win.py @@ -0,0 +1,331 @@ +# This code was originally contributed by Jeffrey Harris. +import datetime +import struct + +from six.moves import winreg +from six import text_type + +try: + import ctypes + from ctypes import wintypes +except ValueError: + # ValueError is raised on non-Windows systems for some horrible reason. + raise ImportError("Running tzwin on non-Windows system") + +from ._common import tzrangebase + +__all__ = ["tzwin", "tzwinlocal", "tzres"] + +ONEWEEK = datetime.timedelta(7) + +TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" +TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" +TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" + + +def _settzkeyname(): + handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) + try: + winreg.OpenKey(handle, TZKEYNAMENT).Close() + TZKEYNAME = TZKEYNAMENT + except WindowsError: + TZKEYNAME = TZKEYNAME9X + handle.Close() + return TZKEYNAME + + +TZKEYNAME = _settzkeyname() + + +class tzres(object): + """ + Class for accessing `tzres.dll`, which contains timezone name related + resources. + + .. versionadded:: 2.5.0 + """ + p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char + + def __init__(self, tzres_loc='tzres.dll'): + # Load the user32 DLL so we can load strings from tzres + user32 = ctypes.WinDLL('user32') + + # Specify the LoadStringW function + user32.LoadStringW.argtypes = (wintypes.HINSTANCE, + wintypes.UINT, + wintypes.LPWSTR, + ctypes.c_int) + + self.LoadStringW = user32.LoadStringW + self._tzres = ctypes.WinDLL(tzres_loc) + self.tzres_loc = tzres_loc + + def load_name(self, offset): + """ + Load a timezone name from a DLL offset (integer). + + >>> from dateutil.tzwin import tzres + >>> tzr = tzres() + >>> print(tzr.load_name(112)) + 'Eastern Standard Time' + + :param offset: + A positive integer value referring to a string from the tzres dll. + + ..note: + Offsets found in the registry are generally of the form + `@tzres.dll,-114`. The offset in this case if 114, not -114. + + """ + resource = self.p_wchar() + lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR) + nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0) + return resource[:nchar] + + def name_from_string(self, tzname_str): + """ + Parse strings as returned from the Windows registry into the time zone + name as defined in the registry. + + >>> from dateutil.tzwin import tzres + >>> tzr = tzres() + >>> print(tzr.name_from_string('@tzres.dll,-251')) + 'Dateline Daylight Time' + >>> print(tzr.name_from_string('Eastern Standard Time')) + 'Eastern Standard Time' + + :param tzname_str: + A timezone name string as returned from a Windows registry key. + + :return: + Returns the localized timezone string from tzres.dll if the string + is of the form `@tzres.dll,-offset`, else returns the input string. + """ + if not tzname_str.startswith('@'): + return tzname_str + + name_splt = tzname_str.split(',-') + try: + offset = int(name_splt[1]) + except: + raise ValueError("Malformed timezone string.") + + return self.load_name(offset) + + +class tzwinbase(tzrangebase): + """tzinfo class based on win32's timezones available in the registry.""" + def __init__(self): + raise NotImplementedError('tzwinbase is an abstract base class') + + def __eq__(self, other): + # Compare on all relevant dimensions, including name. + if not isinstance(other, tzwinbase): + return NotImplemented + + return (self._std_offset == other._std_offset and + self._dst_offset == other._dst_offset and + self._stddayofweek == other._stddayofweek and + self._dstdayofweek == other._dstdayofweek and + self._stdweeknumber == other._stdweeknumber and + self._dstweeknumber == other._dstweeknumber and + self._stdhour == other._stdhour and + self._dsthour == other._dsthour and + self._stdminute == other._stdminute and + self._dstminute == other._dstminute and + self._std_abbr == other._std_abbr and + self._dst_abbr == other._dst_abbr) + + @staticmethod + def list(): + """Return a list of all time zones known to the system.""" + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + with winreg.OpenKey(handle, TZKEYNAME) as tzkey: + result = [winreg.EnumKey(tzkey, i) + for i in range(winreg.QueryInfoKey(tzkey)[0])] + return result + + def display(self): + return self._display + + def transitions(self, year): + """ + For a given year, get the DST on and off transition times, expressed + always on the standard time side. For zones with no transitions, this + function returns ``None``. + + :param year: + The year whose transitions you would like to query. + + :return: + Returns a :class:`tuple` of :class:`datetime.datetime` objects, + ``(dston, dstoff)`` for zones with an annual DST transition, or + ``None`` for fixed offset zones. + """ + + if not self.hasdst: + return None + + dston = picknthweekday(year, self._dstmonth, self._dstdayofweek, + self._dsthour, self._dstminute, + self._dstweeknumber) + + dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek, + self._stdhour, self._stdminute, + self._stdweeknumber) + + # Ambiguous dates default to the STD side + dstoff -= self._dst_base_offset + + return dston, dstoff + + def _get_hasdst(self): + return self._dstmonth != 0 + + @property + def _dst_base_offset(self): + return self._dst_base_offset_ + + +class tzwin(tzwinbase): + + def __init__(self, name): + self._name = name + + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name) + with winreg.OpenKey(handle, tzkeyname) as tzkey: + keydict = valuestodict(tzkey) + + self._std_abbr = keydict["Std"] + self._dst_abbr = keydict["Dlt"] + + self._display = keydict["Display"] + + # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm + tup = struct.unpack("=3l16h", keydict["TZI"]) + stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 + dstoffset = stdoffset-tup[2] # + DaylightBias * -1 + self._std_offset = datetime.timedelta(minutes=stdoffset) + self._dst_offset = datetime.timedelta(minutes=dstoffset) + + # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs + # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx + (self._stdmonth, + self._stddayofweek, # Sunday = 0 + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[4:9] + + (self._dstmonth, + self._dstdayofweek, # Sunday = 0 + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[12:17] + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = self._get_hasdst() + + def __repr__(self): + return "tzwin(%s)" % repr(self._name) + + def __reduce__(self): + return (self.__class__, (self._name,)) + + +class tzwinlocal(tzwinbase): + def __init__(self): + with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: + with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey: + keydict = valuestodict(tzlocalkey) + + self._std_abbr = keydict["StandardName"] + self._dst_abbr = keydict["DaylightName"] + + try: + tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME, + sn=self._std_abbr) + with winreg.OpenKey(handle, tzkeyname) as tzkey: + _keydict = valuestodict(tzkey) + self._display = _keydict["Display"] + except OSError: + self._display = None + + stdoffset = -keydict["Bias"]-keydict["StandardBias"] + dstoffset = stdoffset-keydict["DaylightBias"] + + self._std_offset = datetime.timedelta(minutes=stdoffset) + self._dst_offset = datetime.timedelta(minutes=dstoffset) + + # For reasons unclear, in this particular key, the day of week has been + # moved to the END of the SYSTEMTIME structure. + tup = struct.unpack("=8h", keydict["StandardStart"]) + + (self._stdmonth, + self._stdweeknumber, # Last = 5 + self._stdhour, + self._stdminute) = tup[1:5] + + self._stddayofweek = tup[7] + + tup = struct.unpack("=8h", keydict["DaylightStart"]) + + (self._dstmonth, + self._dstweeknumber, # Last = 5 + self._dsthour, + self._dstminute) = tup[1:5] + + self._dstdayofweek = tup[7] + + self._dst_base_offset_ = self._dst_offset - self._std_offset + self.hasdst = self._get_hasdst() + + def __repr__(self): + return "tzwinlocal()" + + def __str__(self): + # str will return the standard name, not the daylight name. + return "tzwinlocal(%s)" % repr(self._std_abbr) + + def __reduce__(self): + return (self.__class__, ()) + + +def picknthweekday(year, month, dayofweek, hour, minute, whichweek): + """ dayofweek == 0 means Sunday, whichweek 5 means last instance """ + first = datetime.datetime(year, month, 1, hour, minute) + + # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6), + # Because 7 % 7 = 0 + weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1) + wd = weekdayone + ((whichweek - 1) * ONEWEEK) + if (wd.month != month): + wd -= ONEWEEK + + return wd + + +def valuestodict(key): + """Convert a registry key's values to a dictionary.""" + dout = {} + size = winreg.QueryInfoKey(key)[1] + tz_res = None + + for i in range(size): + key_name, value, dtype = winreg.EnumValue(key, i) + if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN: + # If it's a DWORD (32-bit integer), it's stored as unsigned - convert + # that to a proper signed integer + if value & (1 << 31): + value = value - (1 << 32) + elif dtype == winreg.REG_SZ: + # If it's a reference to the tzres DLL, load the actual string + if value.startswith('@tzres'): + tz_res = tz_res or tzres() + value = tz_res.name_from_string(value) + + value = value.rstrip('\x00') # Remove trailing nulls + + dout[key_name] = value + + return dout diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/tzwin.py b/thesisenv/lib/python3.6/site-packages/dateutil/tzwin.py new file mode 100644 index 0000000..cebc673 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/dateutil/tzwin.py @@ -0,0 +1,2 @@ +# tzwin has moved to dateutil.tz.win +from .tz.win import * diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/utils.py b/thesisenv/lib/python3.6/site-packages/dateutil/utils.py new file mode 100644 index 0000000..ebcce6a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/dateutil/utils.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +""" +This module offers general convenience and utility functions for dealing with +datetimes. + +.. versionadded:: 2.7.0 +""" +from __future__ import unicode_literals + +from datetime import datetime, time + + +def today(tzinfo=None): + """ + Returns a :py:class:`datetime` representing the current day at midnight + + :param tzinfo: + The time zone to attach (also used to determine the current day). + + :return: + A :py:class:`datetime.datetime` object representing the current day + at midnight. + """ + + dt = datetime.now(tzinfo) + return datetime.combine(dt.date(), time(0, tzinfo=tzinfo)) + + +def default_tzinfo(dt, tzinfo): + """ + Sets the the ``tzinfo`` parameter on naive datetimes only + + This is useful for example when you are provided a datetime that may have + either an implicit or explicit time zone, such as when parsing a time zone + string. + + .. doctest:: + + >>> from dateutil.tz import tzoffset + >>> from dateutil.parser import parse + >>> from dateutil.utils import default_tzinfo + >>> dflt_tz = tzoffset("EST", -18000) + >>> print(default_tzinfo(parse('2014-01-01 12:30 UTC'), dflt_tz)) + 2014-01-01 12:30:00+00:00 + >>> print(default_tzinfo(parse('2014-01-01 12:30'), dflt_tz)) + 2014-01-01 12:30:00-05:00 + + :param dt: + The datetime on which to replace the time zone + + :param tzinfo: + The :py:class:`datetime.tzinfo` subclass instance to assign to + ``dt`` if (and only if) it is naive. + + :return: + Returns an aware :py:class:`datetime.datetime`. + """ + if dt.tzinfo is not None: + return dt + else: + return dt.replace(tzinfo=tzinfo) + + +def within_delta(dt1, dt2, delta): + """ + Useful for comparing two datetimes that may a negilible difference + to be considered equal. + """ + delta = abs(delta) + difference = dt1 - dt2 + return -delta <= difference <= delta diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/zoneinfo/__init__.py b/thesisenv/lib/python3.6/site-packages/dateutil/zoneinfo/__init__.py new file mode 100644 index 0000000..34f11ad --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/dateutil/zoneinfo/__init__.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +import warnings +import json + +from tarfile import TarFile +from pkgutil import get_data +from io import BytesIO + +from dateutil.tz import tzfile as _tzfile + +__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"] + +ZONEFILENAME = "dateutil-zoneinfo.tar.gz" +METADATA_FN = 'METADATA' + + +class tzfile(_tzfile): + def __reduce__(self): + return (gettz, (self._filename,)) + + +def getzoneinfofile_stream(): + try: + return BytesIO(get_data(__name__, ZONEFILENAME)) + except IOError as e: # TODO switch to FileNotFoundError? + warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror)) + return None + + +class ZoneInfoFile(object): + def __init__(self, zonefile_stream=None): + if zonefile_stream is not None: + with TarFile.open(fileobj=zonefile_stream) as tf: + self.zones = {zf.name: tzfile(tf.extractfile(zf), filename=zf.name) + for zf in tf.getmembers() + if zf.isfile() and zf.name != METADATA_FN} + # deal with links: They'll point to their parent object. Less + # waste of memory + links = {zl.name: self.zones[zl.linkname] + for zl in tf.getmembers() if + zl.islnk() or zl.issym()} + self.zones.update(links) + try: + metadata_json = tf.extractfile(tf.getmember(METADATA_FN)) + metadata_str = metadata_json.read().decode('UTF-8') + self.metadata = json.loads(metadata_str) + except KeyError: + # no metadata in tar file + self.metadata = None + else: + self.zones = {} + self.metadata = None + + def get(self, name, default=None): + """ + Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method + for retrieving zones from the zone dictionary. + + :param name: + The name of the zone to retrieve. (Generally IANA zone names) + + :param default: + The value to return in the event of a missing key. + + .. versionadded:: 2.6.0 + + """ + return self.zones.get(name, default) + + +# The current API has gettz as a module function, although in fact it taps into +# a stateful class. So as a workaround for now, without changing the API, we +# will create a new "global" class instance the first time a user requests a +# timezone. Ugly, but adheres to the api. +# +# TODO: Remove after deprecation period. +_CLASS_ZONE_INSTANCE = [] + + +def get_zonefile_instance(new_instance=False): + """ + This is a convenience function which provides a :class:`ZoneInfoFile` + instance using the data provided by the ``dateutil`` package. By default, it + caches a single instance of the ZoneInfoFile object and returns that. + + :param new_instance: + If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and + used as the cached instance for the next call. Otherwise, new instances + are created only as necessary. + + :return: + Returns a :class:`ZoneInfoFile` object. + + .. versionadded:: 2.6 + """ + if new_instance: + zif = None + else: + zif = getattr(get_zonefile_instance, '_cached_instance', None) + + if zif is None: + zif = ZoneInfoFile(getzoneinfofile_stream()) + + get_zonefile_instance._cached_instance = zif + + return zif + + +def gettz(name): + """ + This retrieves a time zone from the local zoneinfo tarball that is packaged + with dateutil. + + :param name: + An IANA-style time zone name, as found in the zoneinfo file. + + :return: + Returns a :class:`dateutil.tz.tzfile` time zone object. + + .. warning:: + It is generally inadvisable to use this function, and it is only + provided for API compatibility with earlier versions. This is *not* + equivalent to ``dateutil.tz.gettz()``, which selects an appropriate + time zone based on the inputs, favoring system zoneinfo. This is ONLY + for accessing the dateutil-specific zoneinfo (which may be out of + date compared to the system zoneinfo). + + .. deprecated:: 2.6 + If you need to use a specific zoneinfofile over the system zoneinfo, + instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call + :func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead. + + Use :func:`get_zonefile_instance` to retrieve an instance of the + dateutil-provided zoneinfo. + """ + warnings.warn("zoneinfo.gettz() will be removed in future versions, " + "to use the dateutil-provided zoneinfo files, instantiate a " + "ZoneInfoFile object and use ZoneInfoFile.zones.get() " + "instead. See the documentation for details.", + DeprecationWarning) + + if len(_CLASS_ZONE_INSTANCE) == 0: + _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) + return _CLASS_ZONE_INSTANCE[0].zones.get(name) + + +def gettz_db_metadata(): + """ Get the zonefile metadata + + See `zonefile_metadata`_ + + :returns: + A dictionary with the database metadata + + .. deprecated:: 2.6 + See deprecation warning in :func:`zoneinfo.gettz`. To get metadata, + query the attribute ``zoneinfo.ZoneInfoFile.metadata``. + """ + warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future " + "versions, to use the dateutil-provided zoneinfo files, " + "ZoneInfoFile object and query the 'metadata' attribute " + "instead. See the documentation for details.", + DeprecationWarning) + + if len(_CLASS_ZONE_INSTANCE) == 0: + _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) + return _CLASS_ZONE_INSTANCE[0].metadata diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz b/thesisenv/lib/python3.6/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..e86b54fe2884553b2b5e3c7a453b4046a919a800 GIT binary patch literal 139130 zcmX_{bx<5lfW-p@2oNB6a9JdHaCi3r0fGbw?(Vh$g1ZHW;O_43?(Xhxi|oyJS9O29 z>Y4X@ue)apC}wAuH0;BNjNhkPuu!94PR7no=2nazwl>D*HYT=APQM)9o&H4B$5vY) z{=iWSliQF*on<`3H5>3L_8Z=ZHfyqgZ{43nKX_xmSDx*HrL+D*@rCM^tZ$J_T3gt| zUcz@0@6*xchNo_}$Su}u?<3eGajmAM_H>l*Aj9**^JW<#h|6+{p*8vTqb+sw(=&wC z1O74ja$T-OLJs2Ffe;>! z(2Hw3<(f}7l%|2D9C@u6HAFTzI9xjTFjabk)x}Jw4k6UlTlMEv>Xov84n?38>IEN# z&(~K;C9lv8{OIU^XttY2GGGo1anaLF2ul?boaAi@Dc>yHFLtPB^0%_{2{0|MNM48N z#GN5%o8lNfA}UL0FZn6cO}!`cH2n%3*0I6G>tcufx`1!nrl7lD2yt3J9tbbL-z7ce z^Ai4(naBlwl8)2UcY;cj)@TS_m_ZBQVlZyUH;9PwkBEP7PW zMmD%8=>CDlJKiwXm8oqnq&GS{3xEGMQ~e$CR;*MTMLSuN$uoSqvazxD|5zjO)*E3sHQ5^*^#H1{biKJ2y!KB5K#;=q%W?tRDgUOXUUTg!= ziLXjBOE6vya}1vcBqF>{Hxv#@6F%WUA3MHUxQWh2_YRD^i$nipUFB5!%ra*~$=;2}8T-sgHp2S2Lx z;wCsTy~jV}{KB|>-0}HJ2=oS{0UKwD22=AiU#D9zzBCFZjY~XAe0j(b`r89N-2q>P zBW^4kx0dHk&6Kt$G)$N~c+ajbYn+d^;e6(^RaxNckLfx%kn>%Bkql!D+a)gwBk-i@ z(6bP}evbI5B2-9Z^|PTctt@^8@qXG^>BlfOw+*9X!w-uk6p6FFNJP!y3y&GyUo~5$ zjaJPy)2Dc82?eOG>iC35-w|A0)X(OoHOmx!*LZ+jO(UB3(m3dwc`b3851d9$@|)Kf zN}4libKEJtv)#SCHOgN5BpGUbFOje zWg4x=RyCGc%lCCzJ*xaO?+(X zjb3%==JKr@XaAC>A5QsY^49&6vMh=mQbI;UD?s#qj2oA0>ODI^&+zf8n;_Clw%^8B zYc5q<8z0<>h#4RPMe&hE2uPBE!}$^L-HBk$8Cb8~-cKhP?b;*dRt*jA#KjDJfo||C z;=7ju1b#Znc-MYp&@eG(fEg6^OOj+Doc}MrJ3rfXlF6?9*q~u*%mCLob%ebn$w4^( zJidD?KoF-JO?Rs%26srMiS&&~zOv$dPDT$OMUvcy2RXu3OT6eCMSf*f`kYK3K8h!~ z{}FV9Aies_= z)oXehFYPwAszI}dGGwq=A?^Lf+&c; ze-VA5>6lxkvfIUNU+%lF zNwl|eI7%gO;g>_1GHe(*i~JNa_*bGi#3gvE8W&tWX`TIrdBjWg~gT4P#+UFx0kvS5#_nE+9vd@{5eb? zshw$IPcCt7GM;WZFSAX*Je&e4CPRA#l-5T*Z4yWMpC2xm;E@IGCsH+H=^ArxS$~UR z?G3|6)8o1G_l!fHb}A&$i03&$YWrzTkwb|jg2^xpYJna}5ux0`&!=cvXk*{t3oPcq=Va6HQ4Yy{!f5XQ{{j7+>>oZV zCb>@mx`~P585(RgGO}T16~a%>2_ID@vrmY58XinDHd<$8b;VEa2p|0=xz7S>9Oc?( zL*ubeiE$qrTs1NBVPhS@Pd*4Ab>0++;rM#s+v^$68~hwT3MMmu2ReBNvV;MhzvOSp|6q~GW%BZSMMonSdc@sz_1+k_*c+2N#{eV5715iR_4Nhj(#e|+;UP9Za& zgsLb+*D1~-xE^M6K1MKD_?sFl6TW;WJIX9XWr~-uz7*4(MG)%RFcc8q@n^??x0sfY zoR^fGmy(>9mYkPSmiyS{IWH$UFHd$v?O~mp9$uO`-JvY}lW^*SW2lJp&EF>7V8?)B zHtR}xDGB=gET9ZUx*QpT*33Zh2HnWKOj^~s8x=Ahx4WYZmye3dH&)uU9Q*4XJvLyU z#e%rylsYuuwSR4p%V>SN-k)HW!DxH3onZ@+6H0)Fd=7XfWeF9ZsFG1Zk|C&&Gp3T6 z%ME3qN?)$_dw|pO(mwUw=!85AU%hSm+MN(H#VgjGkkAFun<)r-W6b~cnoW0P z6sm#77q`x>4Y_|K<2pFHCmFv$v~KjyH(G)*eQu&mr(dTQSW{Jy6?*9Tjncd))xpRtmD zb|5_i3yc8d&1BtH&}l$esQGesPlQR=s5YZNa^;F?m{u8ruJ4yyOR+i5{yZOOO( z@Ru_Dr&PA<2Gd>8yWE`4K`B^iDgT(L4AIJ&+<@~Yw9f@5H!>cuj zkg4~;$iRG;~0XN6WN@G07^(jyvyrRy%#5q5j0*i9bdhRgN%!-5$LPh(- z=_7kZ=dtVz=^eyP)eHthtV5ZV@(9L*Mw5oDPuPg1F8eSe1nQXg+#{^lFycS5{DTb$ ziKVeu4)wDaB@wavIC%@d)KB?JR7IdMg<3iN_H~L@w#tEv08&=iD@5BHd;a5th#4{H zf}qTIl7dJdES)GmKN*p=P9cf7<;3}2O41-~MJS?m6Qq-orBg}@Du1wbs{XWo_=mi8 zN=D2r2P+7OM8znPbrYqFW1k#~cQ?)qiPwwJV0Q&8(t$F6U~e7n%1*qTHct;sOj<*4 zZ057BkU$0*N4J?9Ghi^7xkGOs`XJG}W6isUyiA_h|f? zD)3y*YdRqAeE!GHc|GVPU2?v^_o{hfp}M^OkH)Mwwt_C43*?m*K%;Q!pH#It*q?9jMMuqWLmF&x8Z zOD{E{q^@XnTArpOFvMM_|L1Cqa4K*opuE4~Dy#p<$G6{=*phIAt3p5RipeUi4ARN1 z(IqCs{9RY3LqnPeyw&cowP?Jztx*s!M|z>g1Wqc}akouyxZ8sdb-Ya-uJetHKY7(4 z;Nr#aM;DoiNEKxMETm3qvr30=zUp#hwc0*4{pCg3x@fQzS!GHCS+0*7-ps+TMV-Hz zsIRlyW9XmfvZJ8uoyr{Zah_bp5b>17l;mR-HNxaFbT5U6V(Sev{-+aFdAySCjfyb+z_7+1got#ko&@{<(c+ zt?u*4i${80!(DUO_{rTmZ~j7Nt?GmPVreQx;)!ls)bRS1rEz9|s?kOyn^A|XqY?Om zatYe?eo1GGam|ZW6sl8b*`!M3)sV%<#@u5D=7#A8Mi$63zFxos&|xXFE_ z)#IBHe3QhOqleuj+>`5bQiVbsxGoC+>knvAl;#4Q_oz*t*6Ucblcw&L|9jGB{kbtZqQ7>Gx(Ih=pBq&n@B zmlTPzPAryy+)>E*jDyOQwnywMiu-Ir4&-pR?>b2!^|tE5CqGJ?MT%6&B1Ukl4yidl z>O?`wed0N6Ky~c~y55mF#b!75u&_ZY&^$-F#>JzNG3sGBzkrjVBjNLwHe$Y=EXbUG zr{wVMV%R&lyz6E26X_^A3kpG5aON|{_8lokN2zY4#3xi8Oi{8Bau5myE`9VDgBVyO z5H4*r_kRbJxHQqy*;jI+A^prDR0JKANoFsxX4zeG-AI&IMn*8kMi|CM zM#{!Uzl@FSjg5Sbjbh+qCt0f(S*tf$s}EVLFIlS}vn6F=M+YfJ2Sq^pzA#5hrj{aFfTioYj%3^rc)j`xa*hrxd)?1&c zT=4L2w#XkZ*RhE<+h~P5JyoNBKjaI;TqtzGuBXBx00GpkHqv)IZk>**?_g9u`2rLc zpzi^S3{V`P&H&VPfO_YDbrDboEGoejw$*2z&94rVB8C~T=ma;|UY~j1u{eB!RUw1@ zC0LAVV*x1B*&lc;d=TY)|m!7-jMQLVU`cY`ppu34$- zJ2A0zzt12CN5Q+-Ac7O@MsQt~q6ojGVY)w0G%PO& z(ODAhG@NJ^pQn?}A=-3Tcw(?CBL+^enFsq8pXZXz;nLIyu5*z3cTB`PP$~==KCC1s ze7M!e&r3vKt3~vU;yMQ2+?jE9pndU*oez-r4(LVkL&M>E;KfFkJK8<5<~Qw zOyu5#xG9)ewNE;H*4K5Fbf5!VG|-9h7on51ek%Nm5yw&=R~hS;B&g>o&+{R&_jtB5 zWHq8$&~{pW&CRxrUC1J=)@}8Nw$?VS`$E(F^?9E0tpJMOZkz4nu#0uknBbA2`NoQE z;j`Yl5#QuR48-~57}6y;vL{Ajx!2=7G9raM9v!v+_Fc$BP(~4bgE>!6yCfZY?|s(Y zj=!9DU?QC;s0;e}W*6hM*Vd3%qW2J;mLny*FwYrV{&2{&=2lXDE9M_KJ!X$uOFE(v z8#?Rr4_9+m)00)oW$x?5!a;1Hdpp&G8NX}AXoOMOsq0Oh1LFGRFj!Ezq|%*A6WRZ? z*QpwlIXdFAA}&SjshDZeah2G38vj#f0*xWQ?K(gF^`7g{nCh-W*h7a0G-AV!_7HM< zVdNF}3mn(Ptw5glu3Iij z_PD*M37vXF1m>EztK^Y^XtF9;vMnkc;chuuGWETQ_tg}@samJJi3-A~Jhij1ffTau zpUCg9Aw;zBnA|ETls37ag;h|2fk=8$S|GYI#*ge52uGw_QkDXxJa;Wi!>r>4RRiAxF@4W-7UnG*kH zj1@8?u0W|sPZh&WUg0+#(t89iL#aryTXh`LE2F~@ILA!mJ1q0h0kaSqExh+9X9HQP z{MPi?9sV^9&e%jagGaRodj3*jcwYg1cz8;95^Cpp(&O9*GvT8rMTIW}EV1U^4d>;p zIjg!7F%)|{w+7W*54@=)EFu-N`utYsU=S^vFGx^d14)-*V1nitrWQ0u80Cwd+mdGl zz{Eq|U}w{FosPyhY0^NaV;xmxj-q!arnR5g*fStO3^7k)f`SWtgK#c!K4xUEn}>Xp z=uz!(-Y|>lQ32`O%&;wDdI+3B?VqgBylEGS(5v)AI*5Q{kwqd%tS}_IlIzyk_ckC; z=q>_U@BZbC0uzLQHm3ge4Dl3Vf%fnnK)}$XC$J-%uFwEcQcI(+pslLUSdxx%CUo81 zT4Ywtdn?y#I$J-e{qU}DdAum#?sX3Q?LjI&!N#5XxM6K0_8j{pe}ggl%UYX&XnW$i zLNhqG^Y*}V zO3YshEZ`F>Vt=Ni)QUzvd2HomN=vBF97}MGCx$X|7(K0ps9Y5>4^l}=$WR%A_70kZ zr1QG9hV$SAa#=~v9hNM*w3__N9iCQgw4P4tZjlR6EN~w1Ch5IBluKHV{dA4rY!qZl zDK)%(^3=<2KuwU&pDL&wwtFdM>e6+x-b^iX^HdE%8;2z2SFr;{N_We6-Fdv)c*oA@ zg<5S*%*S^NPP6<21Bdxs-W(N4GSv^4($noT^|DfINM0EHOBdD^NQshuFf?ITV`f$e?#h6ZosOWay&ROjC7jmkIXELk~9Aoaeng3Q6c*J|25|7h=r{SVWPf2pip*-Uw3U^}`(Z=s%x*Cqr#7kXSy}z@ zllz1XSV1+(G5BMHr6%udp88MsSNJGMP5$0QBu9P_{3u}@f*-}KSZZptZ|Lx)ic}i; zw+~9uyXg3LzhY62A_i*2Y)Y+A#ctRgat+FGlkuEg1 z#EFg3)@!Cic!%G&n)YEi$7?=5Xg&ch(RvMRcBClX2TCgO*FUqLq(g&&^(ms`F@UM1 zW|gJMm8HlN#EApqG0%f$F#%KFNQ|v=P}yJ)FlVs<3U(#BT{>HXilVx>}Y9Q+d&U4zqNn&%BdmJstkWs zd~x@1@4IqWh=;w*z_0zR^g|j{Po`a(s=3}bTao~ z9k9RdiY<6ulr#9K8ktRtLfi+*<=u7Fb;7o-?o_6r?N_e8rW|lw>n55I2zQC+T(FN9 z3O?ZZ`Y!=mdt zvj1S~ow|(6u9vi}cV*8wspY^w-!tR&=@^?XNiWmPtgGLTkgxwo&1S_&_qP&WL$Mlu zP29Rsq3MH`)NVmUWqxWgGeElnE2k<0x;W%Z}p8aumaMrKFIPQ{`N} zv(=HEQ#E#dzyf4+s%aQUJJBnQ=qJgN_0I`K7pG^Fy0Yhbm`IY$R^yYVYYFWNs-ejB zih)H)>uq4p#pKsiYhgc7wl}eM$0SqaTZV*H%(;ow_oX?=5QB(Kw5O-uB9)8wtaG>!`1%>{T*H4(PH@^-XbwE$X)RKJY&_{Cfbuu} ztiH?8jumX45OdT-VbTOI)8onPr*P_w9GK#+@#?PY`rMxGj7+TGGQ58G_X%YYVsRWb zbzU3etWA2KC{Oeog^J`|2#Q@!FJhze_WRW@SFkqmjWArYFTV+?B0D26l{O_q=mCre z;Ew1TaBl_DD?ekMAiJyCN_vfTT8HG6TuaK=LCn5dxR2mp>uZIDjtz z>6{?GGW9Zg&JG*Op{RCXi4D611yd>~SzX)Ljc1&*mFpo}=1KhlV)b+L{m&y?jnGE*eaxUld$rwA}s-Rk22- zHr}YZWp&1W_i01uWl=R#Mr5s~-yB$6rt}S@ zWpGETj8y{l>rEI<%3jN*kp6x#t&gN1_#O(Vo5(z|{fk?BV8ksW5ntQfDbRqL}=>r3g zxw>LXLY&Df%1kLZFn94yOdzufihg)=nYTH%~oXwJFe7>GonDf4u+bv#|0?DIHXQ;FOO!8+4r35k8Uge_K* z<{E0)fuEZw^WD8@lS3WUQ)bnWc1@WOjEBAO-m|h}i<-B4H653vzp#wpYTLKEhR~KB zG0d6crxbIf-WD&nzQQtoaVT8wc8tYc&Guie{!xpo(j$f1B%q8uqAH8Zhe=fjIsd!N zOC@em(~?_r#z&&kV$79(7Kbb|ZyuCROtD%U>CeT>$3b5h(2%@$TBw;;5Y z(xSM6;vNs8gzX%}q8>u4SWK;YHL3Va5S2$iFV=-(bZlsy3pH_AwB*mS1EN>^Xm%4@ zmV?Cr?>c+>a-IPtt9%8wv)`)2W+69JTbatfS5r9w0X#EFIdblesblG95ksp>`KD7# z=al=U2Im$FmqI+H)%)!Ht#-y(1O|g@W=Z54Sy_QZ0fB_kfkX~S1TKSAW{1B_m{Enu zP(>IhbAElu%?d8ro3iVjyEc;!4Pg$6O?p%j#=+8SmND2Kfmf6FGA9cOVQ%@(awQ>e zwoQ)0PfyK$H5q`IzlDO{s%s@T-19@;%YBTNx;+?4fj_se7mp}#js5F4^I-ovQZ_bw0^u^MBnxi0jPM>?}B2z8WDwSH^Ag=)W!& zNzx$_x;%CO;yF2h)wX;I|J$Xec)<047|%@um>C%^r2n#0+}Le1s?Px>pPm0w^uJ3$ zMVG$~1U_D*npRNwj96tIpf@%i_IY>pO&hwvZ=f$;fN{071C>{rg?hihzBs<3skSoc?xO?x!!=U8Mcsgj; zH22_MH&2_s$JtV%Y=;uP!Z;x&QkG_w?()|!Idk3KjsU)j)x#|noz`t_s%17^Qsks@ zy=)e>aK?~s?VtXP4(a748GJGUq%)2%SES_T$iIPjGEiP=VF$NjCJ^meR)c0@ zHt>#{W559UaCK>AP53!p7X)QJ$r&QYa^t~7ptabN0Wg|m(IWj^^8m5GK+w9nSgb_ zC@eM-8@#brDK9og0UM%0;*%Q1=2Wa5NX`PLiUpPd_K&3<5J5<>&m1;F`@5;T{}rr% zF)>i; zkV9!_F?dZ``!>b53pLae!Hw4+0y<&$9ahBFXFq4=h>MFx5r~sF|DBkS*Pfk zwd3KV1@B#yr?6qX`@~r04CmoQ^|e@L+ZATLWS{R$yFbja1)H^*VtLoTG~4ItSRElt z__#Lfcomn&f9B9(!Y8cYUudh*p<~dYCH_p7cy*~UtbUT4NdH5fHgiRZ8N9YoXxALB zYY#ThnJ#d>9WJpURNE(ANa*F2ojYhaoNLBbE=K34Ubt*2p0%peELXG0+*bE2KfS-K z-r6c2A}P2hsAIm3JF)s{JS*^3w}R_IA?J-C^_<7&&o7UfrgNN|z-@unz;mqFe~&K1 z139Z7R>&r!ODTnv@+hbFT8EcBv~iI8w$v)apM9 z{9;e7{cgMNQ-Xp06uRC$^VLuLQ5=F zXCtAq4?&2#Bw-|U0!n3Zo(c|M+8V~=pT&LhZ`1i?>+1xJ#pT_#FEp6v4oTTmq8O?> zgOQKQrG91@^)_+grEOttk>lSnH2!XH|7+z2Z7>C4P;# zIBb}B#MJ8B**(4TI!;N8Az^4(V)j$N42l*nbNm}6ajS5o!~5#0g6o70n3#CwPy0Bk zgS-CoKQ9cXFqMz>TJ^J|8nEY^e=1>7@K1^`C+M}2pK;T0(>LMCHxI`oNm`0L!e0wO zG?_Q_ko=}zbY8EUv`#aCgoqX z0wZoxzDTIAIYBN#QvQ^zo4z`_{6rxm|JRZa`KFD?=%W^X<^*_u5KYv&*ilt*ABUT&&1h9Yr4G{PS1hRpEjBjrKpt-cNZ*DO# zX!_=s1LiNFH4L=!By7Yi!d3&Z@qiW+(9#83?Lfd62q1$R$f*!w2|xwp5(u$$z+j9J z%L|wRk~VA_mk9|3O=E6d8tPU+XL->2?I2+#!Dq&5Y<@3!;$@z?exoqh^acE~{$>k2 zVJU~+PFz8kj-WS2{GUFs>Ef9}g`kiE4n-R+s?6f{)qCq{pQjZ^Rjj3clJ4rj{#2$VXmH8mG`nedIM~&3iWzqO^kF+}O8E~s3xos} zmBM=4gJ%(liK6o{R%JN(eo@fbQ!OKSdz$Gd!q=hVB!d|23{HyIfR`0b9cEn9|>x>D3)?va3q>w=80lv-siTzuf5r=wM0wItZ?ALSlCZ9*&S}i=P7qy zDh^om4C*mnKSJ!a>Uc3=)_D`DQo?(! zthBj>lLH^qxLDr_RjYZF1e0S_y@IL}pfE5>y%V~#FswxjF{kml;a$7YgjY=|rSGGTpJM%<$MVW52 zN0Ya*GACS$+;qu;q}~CZiEre9$!O$`P%SNfRo>BKyypg^DYQm_0}s~2(qj{Ihw<$j ziN^jyn#5`R@ec6Orz_K!GcwOxRNq_1Ea6)P34vz`wb`0J2hq3r>o0(`x+$4hm@*%@c#*CM>E*%?0v0mFcYoRms=g2^cEEk$fkmefdRhTO`{ zOMF(Jmhb7}WaozdS!k~p+7SU2gf{WrdGw#Cj6$38Hxs9uD_l-_Z;G5Bs)MF8tc4aI z+DXnTnTd@zxQ6x@q1R}4!r!a!@5q8)?V=8>f;x&~J$fC}J!&1~23UC3+)uX&2CnMb zr$Zt_cXD^sj%Jqymbt#Y0}r1hwsxnDqf$L8gkk40zPMbTIhUOx^}j$8{}CWHn(#kSHCP*vUp+@>M)z0GNNm%9-QN| zk*7YvLV~7xnCMb5iH~+6X|GON!NIw4jeh3Sw^9G}Q`-u4P*oUZF;$KOx~PG|Z$B{A z>7!}gm|ajdB`RiZRa(!H{OGsrbjlxO+}N=^+B`VlI(DM+&CIh-V(piT-5zN=BZ_OR zmhSPz$EIDHfQ*)+z%x2f-7@dW5+sLDCDj~0-Fky5uP&8U&N|ahuIAqSTzt*aE@^ql zH4b+@KXUo0iAO83Sz?w+NPe<8;o8pSnOF40rzW~)V41A!U?DNRZGSO$-SUC=IouMs zt+h;;nQsZiX0eG{uCNy@0$&($R2wv7&po1Y@amB;=BxM-IPRBnZi?V>+>fwZZ_3p0 z9=@X2b**%#i|MB2_c7kPqD6$x%nli+_bj(j^hvEbqg4or%YG2tB2unxi~j+QOmXXz zlcn>}QCL=0A#gSh1Zy4l$>tG)Miy-HSk&!LSF<{RvEqis6gpP3f&wx=~c{|RL{8CvLsi;0q>i8xu*_61k6hUA~BT*_}n#v@qXx?I#kj%Y!^ zAn*l@_ke*D?K6$yWFf=$O@?Ysm@1(bF5&w$I$((X&IKLL#{PXLqcmCcsyvEj{8Oah zZ!LVf)-)b~4JV5@dPN=Q37jkjLIV~{c@Af_0vvyx0hwDwAVUUZf+mZo7-3^cvQ#}~ zN*j%T`f)9)N$!5gRVu`Zl?48O;|)+eTls*5d4dCMM6>X7oQ;4w3-zmL!C&@|t>nap zMvP)Xe{VlZrv#!|({2U-SOKnxmI%X=h__A;MXFaT2)G7fpz03n^E^vcZuvihl(#Md zo>3EV3Rqhls7J?eU=V9fOOTm^Kqnv|9Rs8lvsLA^LVz?CU?c;E4PY$)XV3sf7GOmF zmk32;JfdYlFsI|K%`Fk0R(CR!(eur%0`T~MI1I=$^476UWykz*1=c&0hzP&cfaqcC zYd~r5d|elVj6^GPTB#r`-p=|zcoJBL*6qkx9M ztk8x^4Rnmj9OghVHwnFsh@er7lWXw@L+zH7Z7N~g0)N?6ZEZZYA*NU=T9a-4tyao> zX`89+P5#}CU+aASpR{!h0*}fzp3@PRtTIk7Yn!&mr&`X7eI;`~bHSh`9xt zp8a`FW&)IDhC76`>Br;fs1@5L=Q_J5wM;5e@k|;he_USgIUF0jD*|1h*||*v$FsG- zo1^kacl+pBZ8TAw5BOVu=ebPWL(5L8YlGF-XukNe_90+kLoxj0jT_({`oxn5L;Cw(pgiqmm!DmUq}u3#=w zgBFgH{(ifc9u9tq6O&qHv{IDcNaDP9n~Bd&SD2QxexfkGt$v(jakNbxXovVLbC)R& zKQ`uMVNpO**dV0t(la-LGoCgC+a@2K*y!sMlSgtG%e>mKY62ScEC`0R4Q~!gSGHDN zSe_n}4ws@x+FveNUh#66OZ@khKOoA3FzWCKA`06kgT>DRB&nh$sV%;;^D6siVT2NV zj8K`?H_PpYnKpx;4i3P|RYswa2H`12OERaCMd)*j5cJvkvBz$`H)r2joD2>G0;oWM z5D0Jq0XuGCg1vD5X9JxAnL9o zj8p(C1DGyN&j1&|C}>fi5h(w%u+9L$8UWltk2sBl1zj;}5wSq(HuK?X2eyDNXuMDqjEU*o%z6QKifM+*@8wXEs!Rzbo?Mk;`hkdAV z33NoO2YK8r;%$PyKBWKjC?SvMS$F!N-*s}I*eNDLky-X;;rS$tpvgRplX`Gv`EO!9 zmy2pWDScdPj(T*}&W+aaxEDu6`bY|4i{h!ei&Dw2)1_9GB|h%VUzq|wmlk-}7NOe? zJHqJ|;T1W$TxbaAs%Y@&Y-owvIYdQEL);Dr)_Xsc)p%T-avfge@ppRr&xeNu+J}qa zLE*OQ@#;j(P8{vtcl8d1#^^trF59S&t9$FG-3S@v;?Y&4F~3&Xzjb@> zbw|}igz2TLFcA~j>b!kbYpPy1(eZZo-A5oz< zp>L3tBxoqvRkPVJGhcOgQ*83=Tx(1`H(1>uD&?xLu^ks;y;=}vJ^YlL{Z)nrcO`UcsIT6@ z$&Y0(Mv=81JqHUlH!h;&ViIW}f&VBM?x=T%j^F9pm;GI(cmM)SGy*Jg2K(o4m$@*$ zel)1a*!<@D|2Ad(;oy}B$SOVAr2*pW2M}DVXGJ)KmG+r`B3p3?XvmrDZok=o161UF zuJ5028dPfK$J59M{=#pMxxN9wITN;^`R}Yt8UK#~N(5$Kyr%WVfFco4Jp3;b1VjXZ z)!rF+x)b~nCJ6o^D&~)&DrzolfP{)FTQ%lIw|=wP%Xz~tTs8Lc(SFtKpFyfSLFuAN z1_DEUa)C&g^}hM$e^%?_cm%@NddD1*kGa0#)@x>84f(hTCJp$w@HT}^^joG8d8Xbp z*F}T+`#xq%-SAC8`Io#7+al!9xXk}LKDw{kzrYl7fUeinJ;`sJ$&KTdmd9NhQk;_1!(*D4}c6SsAy$6vJ;IA|C2SerH~7srG? ze%bO-)wF(N*+qQN<`_t|O$@c`h+8mecVruUJx|`>_bh-gHFXpy4f~gk{pHvBCF9QC z0Kq(}v!HG2KlmBT$D|Z0ci|{^T`Q0p%S9RG+5QgC@lDHg?MBJUiYbYCtZg%DWvtv( z$cfp>tf=X--u%4J6h1}nBXO%ZWG?v4bAXW>^<3}*Goa}AM{(I7B@2gBfFN0S-;qjLWs|aXQ?x* zg(s;DuUy^74DzS>ScrK<2}=#|fm=kRt4=N?>x^>FO6 za{?8fEg`8aVmLbbUll>#i|>D}NlO_vs`NCkMEH#JScauD3{=$HesQUnSch|5l|^eO#!rFNtR(O_#i;d@Yf-D%F|eUVQQOxg-qoDqLgT zn-s&_k$zo>&-75mV-8uL9TylLVm|*|Ft7Wmt*F5Tf1^K$pa7BT%Z78QwHIkQp=m1$ zp(!D=#<<$$x9Ln2uIURLm2siZ`8wP#noVxiaSlg2ZNqyUt`YPXWh&)@q=wuF5vt`9 zfA9|#>O$2gQ;W64Z}W3YvbpvaX*^VmEX8dtMy2}w&r-kRIj6=BZ>uRQkql z+*Zdzy(IGf@&BMry8mYHT(62;#$=0KpBhgwb3rH(C-0mIt(-8gSZ`J?wc*HE$K}L$glTHTr$L3;_1U=X43g za#nD3>Bq>#62T4i@_A~Va*{3Dg~Ai&1*QV0{3B#(AMG~SMsX7EpJJ-O-Ok;S1R@wF56@c z^w3hWhC+8mbTkjLhS3%=^rE*2B8R;zfP-PNwQ8YexZ|}zMfYew7&OO=0d$VZ}rR$sqI<8wP^rC zdYggxb5B>&zUg6E&LIF_nhCi?kdJx@yAT_e(>RWypJQZcG1 z_k`t$?G6Tg`);vT8F&D<1^GuMaM!gGI3%WPQ98hQPKi$ZO+tg_d8mh%J$5H&WU!}= zZUm{zbr++Lif5`OKI4#|kp`zNk7lVOTIwfHag?GiL)21@4C(W^DWnm{@%AbQ_XtZj z^&j+#E^F!49Mp{^weedu8>HTo- zx%ZssdG47z!@w}JJA?NztoTWCmVJuTb{_>44P$Yb6d^zU(A)9xWz(WJa6a;A%5(-sQl)A?M$Xc9 zCf!(mBb^RvC}%;-7gceQXeh@<7NmN@!kGU=)dlIqE(*t1P0L@xFY`U>@(dBvWNxW` z$j#ed*fhVM5skAr<$qHBffDW(Ma`V8_3Zi6EA)SUFO}65%Dj>NnU^6i^#Y-eD%`t2 z8hL+_6HT+Mf4vqWp>j6TI+mo(*~G&-APNImfzMJxzg~efRY8I(H|A6KC}mqCt&hl> z{!+9#?}-4a450MG0g4r%tO3deJKR0Wgt=Jj;XlZtrK3ZXRXntsrBrmjv2^i9eVH9x|AM>!5%8Y6Mg=e@Xgx@WWWT4u+J5?r7)kI2@->?2GOqD-o9y^HQ8l@XH(5msZ>Q!-MC^iI%SuHeb^X=*q=G zVqRL3368&Ut+Lwo9vS3LOWfk18!(lN>xy|9Mka{MxjtPUeiAk3@x$11pKWualu7@PTQRw_IX4 zh@jEIp8ftqK&E%#MWRu9oe_y0cyusoxf_X?-QYv|m zsGI{AmChGe!v?X}6A;9Ik6OhqAld_v)e#7aC@sXr{REGAXIIc89HreqS0AyLqJJ*cw$O05dXdj3k_R>*k4WT<=xE(&eW7Fstm z8t8ik8TISz@`u1e@30?GPLmH8eors0z5l3;t^FtZX@Vg8GoPgMR*7AQH--OXUen|* zH8k3?a?m8$nOYbJdDbAx)6;Y?Cake6=HHtx1k^Cea66B=YmBmLKNt{Q>MOWK`Kp=C z!mOs)>j(Sa=V^TyVX1>dh?iWW90Y$sgFd>j50pH}ay-=&x!8PTWNmj+X1J19`Qzz8 zj3!|nKjQX0=siOIEE`T;&ySe!om<5ugG7IN%)8k&lwV%szM;H@2z4&!I&HMLg*z_r zQ{AkMbb5&RuutyHyCWVOuc!|VIEj{R+dfTNcX=PwTu!&rPVaOOX6sp-rqwV)G;)gU z?H5Y(JWS7~Oj^0y+wZ0O?3TyIFK4M)Nfw;$&b-!riP+XyveY^*PxPkOghR&t@aJs1+wN;C3asv@A@zSN)S$0~+h+@%SCgXxNDq<;SD~Ab_M+7NRpr-YdWr!Y zzm3YZ{VMC-TlN~>^k`O=j5Yox=hR0W@k(@gAUaroS=G=~%#xiB{$$5=O?vvvVSdcqcwOLMS#-Vl zSo-{%0rz=rr`6O(>Oa2OZ~H^uj{lH|>lfNZpY$GhctU=ePA#!zY7DtF8kjMGxCz#r z4P`$e;55IEq>$J|XA^fpP0jO09#Jz6*T^DdTvC@K**(!jnMNu`D}JhgV@UHYgpr1q#zewiVmUZD_$tJ?>bb-;<}=@E zESiI-0QjsJjXE;;=W@tw@D&OmV@MGkV1PIQ5MWZkQP4^tPpAP3j*?0e_q~JUDD*j? zXNW@IA!#p>88#d2jt*FOfI$ryl7JBb7%ymfX+)&};4=VV0svt<#sMJ!G1GwSJO|>0 zD=ZLv0RrrRWdc}T!NnF5)7<(&({E@F$P(V-2uZdw!h;AJZE6*DC%*Bt^ajh%&)_wc7gW!1>8GZTJCj4o z0&(7?wASkOuw><5{$x5}XRrI0zFwmiEZEIHB;iS*u z#G?_nZhnoc&(zt;$N`?A=s9tot2;ADe6nvT^n{ZsIp=*;w}0YVR02a;R3Z314BK5GTi+hXt;G_@$P(Igf7%SaKBk@LYjav!=@gh-KmF!;b3;ezd9)&KDE#)d-HE{F zSEyEa*8JabYNNh0%0|!gEXR`zIF4V_Fw2dT*~wXT@ zetCo3H=SG-p7u4^^>=W zGT`dA)*u1|Jur*Qgt)ewY?(xU_4#zem5Z2sB&Vicp2(aY7e!Cj%2i|NPmT zm$_7)|EYCm^lccofzdJNJ*7umtjE=={!u#Jb+zR{Q)8TRV)Yhmwq7;eXD#!nt84sI zZq;p9-7ff+m&3Q7v6WgrBe5stEm-DTvAy7l2h10ZmtP{-_Y;5A!10Xh?BcFi(>_M! zY{o^SaN%jxFmV(btNDMnVNgYmO#55)Q-$W7FgNADvqf(ff!-gLNJx6UQnu_}wl{XX z!YZ+;%gaa6U9JT0XAtlJydJ$+A1inOo;Y&hZ`7#c733Ms0p9AM}a7iqe2vfQ0du2_2@;V$THqy28Oz0nkF^?y8`wW%QVp)SOeCd=qupg zVuLZT24Gjp$%%M|vSfk=GdV)N5HLe3=fKi zRQX9vizP=WB7&KILT?SwLyE%--}xJROHRvZFm{Gd7*J?1G6u+Gq`xJwdOxk09I40( z0v(_E>BmsT75PJobC~hI1tKd-Yfv##K21t3eD}^b>J7Ku>*1fpmCS_S8bRRxzW^Hu z)Pul$;X4u8V}tI@=r=Agi}k@!5}<2FW5+ez=k$I8dHy{pCO)Ki2tYZC-o0Cnc>}H# z0e6a-5JKelqE;6KePiGJ$ooxq#0TPQ02qiujU`qWhoXilR+o#?1o|~a??m+eCSFro zZJvk5S6%)$anYcRFDIN6yOTBGH_6qE^=7WPBh#Y|Yg14nf*M~~VEA$ghIk_0CH@A1 zcWu`75`o~)LHz#u2UFwRqAzc(#5~9g!KrjZC64Iq6ud@saUx(TqA_)*ApwT{4n!k- zPEGAs=4Nywef+14;>%EBohb3dY=_UhyiE*jOC|MQ_EaURuIr2^?03blhVHN(8@Ca= z_O2lj4D3(Em(q@XFG@=$lUPf8jc|>k+CT6U=hRPWe(bjYCUR$Pe?HmtZg6RpP-So_ zsw;6cxAF@5`NNH&)vbr3+<4<~c1*UT@7;&(kB4y35E=t4@>xbgeTuMMg?WUqjmD8q9h3kJGm0eHzqP33L($kE+`jpGBO})ih9!EpUbK&<)Sq&^NAiF(7 z>y7%+YFmAbX37rHW(6-85wBQMgZ3pT7Hzd)`x44v9n?no9_Zd2{j#gs!PAt&9K88 zdWPUmcug|P%M9{V2OlFF)02v{kl9d@35HPCS!^N$uB0!z6K~nVIt7zO1Pb3BCNxl# z3XJ5;7AWIpYVUX-)~4~LZ2_-j_2V(8Id%jxjgY+);6)vs( zD=J#~_Z+k=E9|cHn?zM8=U9T^nE^q9b6_sdfPY6HEBiP14vl^sfY&W5a9r?SeEt{l zGE(5{%Sdlv&A|Qv6Nm(3V9j^>aWXFh*yLZ;SdS&9*BiLe@19x$wSPZA@&P2X13}VB zJeBVG0QE@X*Q3G2uV)gQBCZUZqM(S*!NksPzxgMLbnN|p^T>&G9iMNjFki`&f4!~3 zN${RV-&QDoKA5P$^iw;lk1>%B3Q*6s<%%i37k%@j;eQ2xlJtc}{uQNUoQz@U%g8HK zK>|B0SNdsUz+(qIvjn=t{`_23p)g+|JDm4|2RMfZ#e#!53SKg}C7Ys0cnNPYuHC0; zx7CYf2MZM96QmOvX(J37JSWT@rEq2MKfjE6izP^OK;+6i{ThJu0H_|{NTG?;pe?xA zY%oQkFSyupFhgM^fhRGS*5;Lp^tepxm+Z&}96`bZ3RlKyf$ew2ZUAr;T03sL%s|(q!rt=-V+S zxD(EYG!QmR|NX{Gq#zV!9N9pm;2Q;?!~pslKy`r@1ZY_Tt#RZZD}9LXi>-Wx8xEUl z6M-s+jl5c?+h+$SCQa35{gv4zRzV>MJaY0hdJG7RUJ5=5oECnQxVZ1X z>AtG;%ixIex1{7Ntn)Mm? z*WqpQqz2%ZfrNLg74?p6(AT@z9qOMhi7U*7T{6b#1RGO&ug0T0r*XH1MgwTh#hFNk zhr(T53$l&(AtT|B;US@y^c#$wSI3ubx!W);U=%*!W3t9FAAZ(5K7oV3shgZ7R1tg5 zLx`VuXJYRsC!1}29uZoPHW!zEoA(=~*+&SQy@#%oLounVumaaHm zl}cl}Sv!U$H!M!jy)O|CU zUUKJq1*eO9lcIwvJ*$yPLb-ZHHP)ijYzD` zKqXoHAMegXjJF)B&z%Y}oM4-1AH zE*X_U2d0@V9{o1fIil{?La?f{`+zP7c%T_iz#128k+CC>rArd8{{oBK&+NCigXYby zMxKR!vq{JdJS7E2d3LNtTdb`EA8K^_bvMq&4}Yi+3F-6ak33+vHmd_#|emWs?{?1e|=eEg9-s#R86>iR3&t~PO1JBO7~ zw1(b{c%E2UXKSFQa>vE7CLJz3ZR31@ET3oES?cV4K(QVdUPM+Fy??h8;v$3>bxhWB z`&jnDPkbl*FY7SQ(z@Cmtw^-1%uS4X?qD|MPSnqyTJr7eM8}0>{x|aSEp+Xn7xLau zcM|?$4b%N0OjdpqA$LdbAL^>~4|ONyFXk|NWU^T=EAzl-J@s5nNWL+lspK7Fd#I}_ zAmsp(=pW+bAOJuQ*Hfp$%F8$Y`EuE}Wj7nRKp(MjmC=F##)a&nbZMj43)z+F(pd}( z+4bqt6if@*ZRyf}GP!wfq;9qbu4651pcFDU+qRv|OG7Yuqs*`~AeWiro_wZov!x#9 z-4ouOG}Yva@Eq-Zxg6#?cE{Ns$|d3b^iS}taxd~9;?L;mT5UKhnV2M>)$sWg=>74< zBuPUAp)Ias`oLLTZ17lPKxkI+O(hp;+e80Kdq1_n*qyx};kAtXi`7dwS`Po}IQE$} zT?_VdC8O)tjbA@$q{9z*?hiCK`^CVC^e{o*l~|y+IR5Tmm>@KAaXdTbt4apa?x0zFKM!s{IK&pbm z>!-^vkQ!e=<^4v>SE0fQ_qWk2yl!o4tuw8a+Qt_AUD zFd5k2#esWyKhr6(T#X^rwC{hG1+I{GF4r?^IPU|CKDs))1dui94!P|#^XUbdE`2nr zfncrkbm6YKpazXA11D>`0jn1UkUIKzl0^aT7z#KyYbms$^|qQA;7ivEhL$XWQZ zojA(jVR%Ztl*9k|D3B=x-T@ zYAid23nynN3Bq$vlVn=8T)7C>*dyHswu2d49b6>{*8~NFBTsW*cFTC-qjR0Oe0nm& zcIVfb<@%Rk4cvQ{^3w5CnaN2}@Y3_lKkliq>l~5Q>*X~TV22|aAYGcIjXzoQ5p0e; z?R>c^bM{2AHgf&x%ex$!#C06kPlO$G+rJro^#*%XuErC>vi8?#(fOx@WpJGey(0g^ z69MKr`+q>o|9}hs11A0lr27x3@E3d6&+4^EBBad#Woew^pe@yifKZ>;NdBy~7dx@zfaYcH!4E-dl8jprWDb^UWHN>Rb; z=SAFn%Pv}Y9JJv{?d@HE(U(M+%Hx~j`pLTUdMnv$OgaPU=x@HwXr8Z#Biz4FbT`p@ zhHZDQOPkV~l2u$q(DTaa_I#}`nb61X^jnLfQcsl3ZMkeL(Sjw;TFGf;vGJRNLfG`w zLRJa7yuOw1mkg^Fo!a4Y*yzP+hwjrIEuK;6IF{RkwX9#Hj@_kDiR*8R%Hd%Kr8yE) z7uI<3Dr3GrcQY}Uj|#B+BRjJ$%tZ#+M7b88$nf0fbaOk4@Ha7fN?*>og7-vKo|eaX zci7A&XzqpJo@L8L)Gsiitb91@Ti$kj>8toPWI4W%b7J-pO_PUAQXO`ttqyb1r!T^^ z7srkMpe=baV38QZ@f|MEkf4DErH**UL$8~YY{(Ru)D9DzP5_-8T@UDjJ{KIglI$oPA z;f~>Cyf{@@YcDgi)(gV?8ZPqTB)WDBV_8Z<=$)%Q6O+<7T&A&_^;sa`P?WX_x__oNf5`X$NTlDA-vHbw;B4Q zYgi+jg23F#!IB=HxAk(Q6Lx(Zo;M$F*NPhoWck8>K%z<3F>vXk*F(+A@5jx8k?CvE z>J??CjrEI*af`JV&N*E>DIQ({b=xQ9Mi0?i;s*Dt?MM(Y?=7a0Q(m-o&z0*uHfp{P z-A6+fdF?Ki14yiy$FL7pVQ47p?R8R3Iyx)P2G=v|PR{U8YpG3L`U~tfrWb?R6T=+7 zHg`$nA;cP(XQ~etQ-=dJ3f>UJy{tn&BDuElSDow`UgyVO=z0^T$wz_>n|;cr9glRk zB&=e~=lj=r&FlKKFLhT^+Ri)UeDb_n-X267QEWTf|8ty9Q{1_{c!^4q$kOS^<4eDEN#MW2Q<&uWuI2z%dP%G(>@4^Pc5|PX zAQacmd5CN=b-NJ&3;%q)Ed!tQB zxZ~MpFqdHy#fUd$_qcdUD);3B>KhaMtLg5=222}MM-khPn!LSURsI93mh!^elpAX_ zw=$;i_HQjaZrHlXBf{4YxyB2tXO~JU2>RQkbLu^lil97g!<7@(Ex15=)gGOhpofd5 z&+SMj*6?UC*Mf8>myNrotkJE`@`Jh4js&M!N=inHju~l6naDcNZ5`Qq`v_D2wrS$? z9KGtG>(#@Y${mx8PxQAHM?vG28cl}Nb60hp3n6UrJEN!_5j6>Aex%u(V!=)Z~;jHlFqk6#VpRaN!UUEbtNpoDstvNWC z^>ySYEN%5Js!F=wVy`vn_830!n&>h@m$owv4(T>ZG+fXRap7hh?8Kp`cZfeW}7Q+lfYwq4*NzEZqxmJV@`Eid(ND-PJE7|&G0jvJP?x)IvEP|JE%gB&5#+~tGGe|?dODqf6E$3e|*R#rB9R@Jy(OSfC& zHEMTtt&SC$nIh#@y0;6gOxL6dzc0rhY0GVnGCNJT=9#W*JbD+(&gzQA1}+0wTV;*k*!G;+2Hr~Rhmv1 zEblBhz*HP1 zeir2$FhBVM@GA zHQ=>Qz(}vA_dA^cRhL=&iyI;Uu&)8p^zKy z<=vlBH*)^yH(QGVwGpJ6m^b)ss=+L3ffDI&EGkCh@oR=3Ff1w;rSUtXL{TlOKxaIP z9o3?GQ3`*3m|OXCi_|fOm*AV5C`!~nkVQ#+G-djiscyv>(Z61V{?-3Ti}4L~J{jDg zZ(_r+KtVjrO zB-R?Y;(p$n_Mmx3VuaD_5&8M#J)Y!0jvM^X=b$6NxrW*6d!UNHuIv@cW!6gg88)3m zbDVaH`lrjC={uT&Ug%-4JHz)E3dO!OE4beoi8PqM^L>?)yp|-ws(4)X zOr*^zQ;?rYJB&Jq6R9mMlaYTH-cg)Pg=pbBb$FrzF9w#ZDiu)_ziWZIzUvq-Cf2VN zu|*|)MRn#~SibryP|A_uBAw%V%)e)Bk>^zfv%7)Ks3r^#J{MDJ?xD}|!42MZg@sMDJ^8@jB97Iw3BFsc{Aff%up(Y2& z!aqxCs030iXlO8gPgN@ReU0^WP8%#?jZ}d*57-_husUVinMZ6yQM7gDd%oD=QizvW zzpDDwnHzG{nfocfNKFMY5k<+@<*OSv5n{>ez95RK6E09M{vcWKJq;`T6)iTFY=$;7 z5${f>120jOw@+Zr5?IYa-UTNxPoQ*xHy1HXq7jT;Bef?A)E_LFh_H^hz)k_v3KK<9 z0kP4EP^sroLabl2Y|KP!S_SIQ#zLhEN~H?+lb?ofUI6tiJm~`4Rxm#TRF(`XGX(D} zz{ywt2kO#R3749B1IpN;XC}gQ1z%771ngbo%Vltg7>Y|2=rb;~#|2Aq8`b!TB89&% zZ#VNc0bUm@S2kF#t^f9}k2>@Hj#NQ2D_F;9u)vpKw5UCLV9Vjjl);24|Ctv;NFfSy)xZu}BxXHuksjf&n zvLa8s*spNe-L52_f!Ja4Y!{ctVDK^B_-41=uJ@Vz@mL4q48;h<_7WTcyAX$}O#~Ul zi+(huIOWdm3~IU?ZK$gW?-PHM7wb3SU32R-d^wlL`{I}XcedC1CqjNTM^_>XKZ>@J zw;DdM>Q6}tLYY@}eE-f_lbvA}RqZt!sE+Xuw>4!FCkH>@^&NQTmdp2g8GBSIYoowp zY0Ww%1>08Cnq_#)YewjpW9K5$uHjKhr{Y?z*kd7fIE9_OwZ`7NL$k<V<`Vo-DxO8@+e{{|gzb5Fh&)x1>=)!cqKQ^h$vAY;&ut13d zcF`(snNqkjz(Y41r5~=-rLW=5899F|5_7udvemaVETzVUcYhWw^`Bh6NcY;li&6Y` z#&>2sHJvWPHI{v$-~2}i;ZrW&W!XJej&K$^T0pCRRF{SinK^uSTZS*!QkUh+C`l(` zG85AhCZ{0cv@|~;D=C99Z@7+zZ`3;pme+~XS503o9|K6^XGpHb_K-~+3i1nHo=Wxo zvnO;v{){B#xr$0T!Y?thgyR0J#wq(S-|~~kRx&zeSS3Z7*nWCRaNODnGG&`G z5@lExZy(aWej)aMz-KtNqj(7=!J8Y|@N$+S3+cf$shelye$F zK_iL3uP+2RQ4#~CJva$8nC>6-fs_Uc&Q`G$e>ddW+ZYPsRwOEy0t@5vNt{phKt%!o zUXzgbTZB6WBiuvcPFUShvcY7qhDn^iiYV@*X8UN~Wk^Cd805cDUGgm*rkEMOqfmxn z(@9h9lNytf-AfGQ@=6Y%k%BLcQJMVFh_a=5>E6srm|UL;CJs}Ld+L#P)(-fe*=7+|B>t`xwl!R z`SwXKQVJ;CpWO}P;|hK8w_--8l0b7tG`$dcSLYW5?MncJrmy?)LvxgCL_ zH>+g)(cO>B{OKEn>>%+6~Z*(Um zYS*)7Drbf4Swux$sf#j3V)?C)zB0rovAXhC#8#9lH_6A#`PHuh)=$YZ@Swee-gd9b z;Gmu7Qb)M*IJdk~UP%&{-nj2IF$^5X z2v~{H9?MOua-L|`SmAETcvRUtg65!b&U87Ff-_x=duLmFNQZD?#U(sNeA#c9%Khfy zp{2uXLC$8Qbm0Vk#_5IIUatYO7(!di=rR@-xa5Tou(&~`Gu*EF!Wiwvuha7p{(WQ1 ziProF@(^lggeDXkSmhrCThz#N^Y>zf$(SndeQ{{<=ypr+wq4!lVG5z9`k$Rvc*M$eEYDsisEdon^YI z11yl;A1>B$)0SuFks4TGR&QRuJMJmCpqf)Mchc5Z&hH-?V6sYH7MK=sf;N}u_KG|f zN{6*RL-FMrO0g?m$-g|a>Xgl6;#~J5{)Hf?v~w$)sRLh2WzBd+fow^c@RVho9d~3- z&Z4PNL-W^uRe$%m`F3L~rh<7r&)yp9>=y$Ybw-_C>)_-s(srGM;+v)OWzExCz3a~q zZ6njAPRCjUiy@3E{*Y1=-GMU8>L^FZb5e-XaGPEZw~mm1{mi{)b^NsbWAThtkZ9YH z7SEMamWbg+@pIqXg(G`WH2R};!4hb0b=E;g|H-s(gjwnBL009pV4#9pq@RZJU1P;o zktSo-=i_G{`L@AR) zc?#3MUF29p1DyZh;CY(e!I9w+nH8QLS@h9sbtREK>X}f~uf4q(MB(Ajxv;S5J`uf;d&0sNBcZ8Jid2%o zh^!omnm%2{Fqy2-%`#?)tY$Y)5Uf<6DxnRC#UR0zV$F_J@zw!TyTm}b`!#^*0Z8|M z5M2Oi0Fc`MAWn(w?@`;Qiy8j1*U836=!4u(FycyOWJjudYloR!qNv=>recU}V>eA8 zq5LgX!Z5P|brwKs|ARCFNJDrm!6y>OeyX%C8~ikDHYCBtgJ{Xx85?9$k|3I|F6Vxr zL(Q=8>@OIb=EjFmjV+Q1W&ZH8v+a+YoQhGIn~YJJpNi?wkmhO3h_gPvChz~nIm69C zRayXER+mq<(%s+et<$oL9AbSe*|IAIoE&MOW{(7qr{FX3@=y6>ZAf48v?V5E0#xLA zJZrvKAHM{7Nl6}0WRS1TC-eITWW6cI7U#jdaK~C7Y7qYolnDUOyZf|{@~vh06GfiJ zCt&<_G6o99*l*ujs`dW;hDErD;G;Yaf}-6ZYH)A5)KOqTa_F!wJrr1%JUT3B84acu z@rQ|+s}|8_@lP@Q*|GQ-5=^bG=aG=I79q#C7$gmf2|)c0KcJZl(!^_a%J8rdvdiqaPzID2@?==*K}-I5&7G z>Ky6jh>1gU#F%3(;(+U+7#Zs(uOJLQ;9!n00TSfoi$Sen*2mm9$6fU(H(jzQFz4f7 z_@1pfA_Wu#`S{i{=u`LO1}Jbr*c@>Q#(4YYh%Y!d)U!R05sT&s2oTqLTZ@nfYCS-$ z5%*?85(Q?q9Sp|1RYTGj)-pq{C1FdX99{m2BQ!N@xpPG{zWgnSe4zY~tJC=Ous|4XC?!@cHv9!)Y? z8>9r|iIqqLgllHC25@Lx_o+HwHUGBZ$2r z&4Cl94Zj&-=U>Q{k6B&&mDRfaexjv<)Ej!{`2%`AOWqsRH4}ag<0JuHpB!BwZk;iH zeJOi}n;&Z)n;q)mwU@l2YYQzX+3a_2vDKXEzJcR1mMbybt)gevmVR>70R!87{rovw z*EuK5Y`dHmoP^i^kVYgpFZ*)h;((UHu-v|1@Pw zXJE(nX-H?ARd{m8rA&BI3TK{fqWy|&AFuh=!fx`EmAtK3@?77_u$5@CZU=p#q{xB1 z1YPg#q~EG@{_Vevko7H1l#T%dG~f8GXIE6zE(N#gJ5p!dzrxooL$NysGN<*MTOTk< zeB*ha9f;P5Tj$?4|B^WSQ2}yc99LBQt&@Jnr&4Dw9YE;~P+Iy=3FnFmMzJqy-8SiW z)iUY#@c<|qqryL%JOvcT=YNej1ugP#(?JEKT$6tO%jO}SeH0m<1J*M!s+4|*Ik+8? zR(E_%E?ZmLaee|hX={>J0>cOQv&Ka$rKi3Z#gTHi3pyPG!C5$G*(2Av)hnbO{gm^5 z5oVTG2W^UHZiUH-1Wi(Eca69yGZT~>h8#r@VSe^i$(>7Xo9+f)Z9Zo&v{Eto9oOI@ z1_&RTTHbfONMqY)DqRL@)&7vg)I~Y9m7vN*Dwo*Y&YI{%jhf`#AI*{6{tNu~Uj=i@ zk8@m|Mx5U5IJJGt(99TK^&8)BrvBb8dK6RLnLE7f&QxjMm6%hjJ~56MO;_JC4Uihw zwA`FBx?b5EdviK?RH&9IGT!VaOBis`9oU~dP_e%UQ8lx@jlov;dU3i1-sWjmab;ji zk@CCQ<>t06XGdo-#9lpOh8JUCI8}(#+j|6j%eQ>FFg$+v$5u)ABI%p%Z1D3i-i3i; z@$U&w_HOVwF{vypp7>?V#`<9|n#CB>7OeMaf73Y9mR}$6KY@Q4@fm-0aFHDzMS$tq z{co4t+`kmJpMUDf7DZ9m`P#?e^XsXef6Jui{^vrZ8@y@{HpS-+*Y5on!EC@wq3faOhPjdhVSglkhS-KqU!e0!azG^rOv$&R(_qFr6K!+tVAKjAE@p^1$XYnex6}-~ z@DUPtLa@5P12_Et%?#X-1Jm4TW)e~|{bctB?H;QAs(Y16zAIJfY5EE!2Uim_`lw?K z((b$ox?7fQa7(#R`pOtu`pS+_x)BA+=t(W^t|ty)8BR8_hX=B!ka9H%?JsDSOK!Yy z4&o3rcM>)>cZvtz0r)0ibEk=x?o~7abEiriuBJ#JCIv*SDMwFCuN=x{LR!|MfXHjE zCJrDngzV<Yf7QNBr?^N} zru<(^^}m+-e=W`bTH16Yag@>Os8g&tnes^2D$R(l!F>l>ghLvy>)-EH(e6)X&#r^E zHa90S5HUD+;wj0GXH7P@L9t<+25QHncp@zaSRU(9ehqidPS;}aiAs@)@5*!GO=aG> z)gH0;#?GvNS0hayWve%C*p>;7g1G%V_kMY>&!wrpyKa9N?>pQ?@yXSrVpdOUpBviB zo7OYAAJ)p%Ydd_j!c)0v_gs7JamPG%>3bcFh&??KWgRx#G4olw4N2L6Z<^`cZ*^B( z>s2_nIi0t|CzylA;~_rl++CU|6kXWu!dx0A@ZZw8x9yj+yiEtW(29ai@lxm>iq+Rl z3h~zou#pc4c|BhgYRCBp>WwC8X>sqMPg~6@H+#7gp=MVm7S67mIDurhpL6yyiC2vS zk~)^CNLG`AA{wv5*_;E%*_Rv1SWNxaah7jlP|xCq7VJ0NYYx^@ykp^OJG`N7bV^HY zcvRS@EU?%P8+hvPbrXk^0A;sObQ`fUxPaA{TlL+5Mr{$?5 zp3p9B#|lc;4PNhKTN2p6)4|`7?Tj}=s}((=#5%`X6V%UIcZEF54=BRYVm8dpDumak zm+=hA&d{62?=zd){W5eVGq!Nt#^!soG1sGRysM{o<-<}?;Lz8s^ zj}Ecxe%aZ^ts?mPu9eynCR7ZQ@p#z@+-^~--GQAgRd@1*Ri`Z)X6aalI@;9CJ9*cs zO@3=pci?wyXbOn;gW{HVbWmokRd^hd?wz>rV+wQE*>JgR(YLwnq?_+qp`38QD$(1- zJR;^DwHo++rr8Z|7b<=Ho;6E?J1t8q_192Ch>Oqq`kyrWfcwsa6SoH=c7{_H=i#Q3 zk7hZHC2;uAajxuNs&qm!ZU&yw{k4;5%k-G(pE3)bboCe+^YqjvA(AC9wrJ1*%QYbej7 z@xX){#V+rD?>FPzq>qyI{FpPVulJ3?uqLirs3SbRwBF;K0e4wWa}s!rvb-CzZf|_H zpu!iKzDz2okhK=6b5*KErK~r%@{l9Ev|fay?DcFpFS*_sXQ*+BIL4>z(t&VQ7ULf) z8o!+6iQ6c3jy;1K7GNikYhSW`fj5hoVGP|UVIA@)VQa5rQhU^VJ5YnCj~MWiO*xTh z8J*bg&dBh?6P`0G(VNRaygH3}BnlUVb!y0%OBR&PSvpLS-(C3%5AmXuUHM{)fY_7Q zAf|C}O^~1jN$b`Ia^HcRdJ2%Myz<60W+3P;T!8&B=9yHAPwUowg;WEG?ts|M3W#Qa z_yoI%3IBzhngkmNSb_kJGQ{gyOebkNK>cP*d}7GANuZ!ipa11mtu*U0$~vAjH?q@J zusF8VbA1cx=lbNIU;X`zBWtdUAJt7y5!KyHLli9hmidW+Df5#iTlFWJ?dngQCi9=r zdlx)0u9QUje(-dws~>F(@*H)GGu)Ju80pHpOA?Y6g+z)lq@Zwe3*G-CR#p%p+=tyC zl=p?|6ob>ZxsUhi)3=WOKb|v_ePb9>&uuFtpYMa2Cx3l;g--_j&o8gkc`;=3qhhHz zh=~vW2=Nhx#7Z(FQ+&MoLkIKKEY0mZixDzCRiVD!#0adU`JP{NE~ALK+O&-yUF<)Zp@hdz)CYPda=!>i#!)kNv+v zt2(li>{)6QXL2ypWLwFrXAI^9zD?RO1*p#&3Q)E2q%fPM!wB$C!U;Y_qcP-Xk|=fK zkn_zx9)mzN8UxJ)Hp6etE2nbrq6io}ApR^sO_l3XS163Mi0~FFDvbR2{hj2+yNU^~rW#6e1}-0hcCt>d2=5k)Ep~gL@t6!34qn zJF-ucpKP;oa^$1ZnE50kF+8~&j>cg|s@@iA*cXVGl?N9tjcbTdnaRM>-zz+FsRD?m z^!@Y%#7dqw0#*_`*82Sx(eyxFxH&NuRG2yN-#Hf#p(g3jz zOpsO8^A;kqxPEEFm?FthZjjb4ZPBq+42P6u)q}E#9KG*65Kqa5|$Z`k=ql zt!5=xA&%bf9j9%#DZX4^vbT>;F=uJv`Fj!D3O;bFbD8~bK>PtJ?mw($)7$juP=$-+ zHL8n%Y7O`u>aK1(6L?P$3VsZGy`I|@wv4($-L6B_*cRz#9V1}#c#b%z7onXqu)jE%+y)3y3RVXHrZARY=-PGQ?@@(u0k=< z^GQ?$o^*WbMcV&Gh%3RFFXQ{%>O_sRSjLw-9Ay&w^~+#WZG+2cY*q(TUl*$}WzO0v zp99(cOt1C_(XPchxe^lN`R`YS%41#Xoc;xhcKgX+lfEF@5tYq<#Y=svd~qKKzU+1> zVLhs`MRRx%zbU@`a#}`TY*Cj zk7%399e(VmE39S=*|FcQTU`Bam6X>Q@r6tsCd2A(c=U) zGmrS8_A&9@G>_9ND$xgnthDBOLb-~iO7TX<+5H+-Z@VVHO%0t6cumGUrB7Prw=Xe` zr~X^DLM=zqrz~##AN2l4_!`Fv?ofOw_OyX-%xf(%Q^$qk^jSVfG z*;^rj`8)#D>ws#C>wqv3aVG3TKLg@xxGMHFoECUxpgA6YA7{BIn5e$twv$Py^ok+H z)-YM0fTcxdq!9I+f+C~g3x-S?B|cljl${N?X^W0p<6piOu@9n5`2hGiG&cEbp`FH? zs8U%>mdqCnB_Nniz#=F+Q;3?Pp!jWv}ea6Up9tB$4eP+u=CZ_tNwA{(XLboX7os zJ?{Iu9@ov~oa^y;e0zW+IhSc;1Ep^y_>21>#gpXcI)8Dn^3o0IVHT&ihdg+Jty|8K z2>hT{@-q_LDs!eS1+~bn^^>q-1Xrx}Hx`qcFC>k*yY!}?S1C-v9+GemRIYUj1d#9- z^SbmtW7WTo7cG6B-dZ2-LJlD3FHT%YlDw2OMhLgbMW)*{GL4fc4pe?owQ%y!asOn< zCoeg?#yn1Z=;<7~eiTt%6VpA2-Co6=GciUUW^xCg)Jc!wPv?jhC zqg-(i5?tRGNZxB#bg7$ux8(K4$fp^1&L*3&T_e-c=?BvJv);MdU^hW=H%(WMh_j88 ztJ-Jo#kLQ7h4!Xop?glZ7-N#>b)y;FRkm@-kht}mCvGk>3jU|t)VV7nEL_*tLve z?(;XTWMlIp)-`DM3cp%n&mY>z;ZjXr6|wR<*Ik)8SHvpbx+;6!9(iQ>jNZB8=6a|Q z48z^*X-a7Ca76H&zU+zQjPdh~7cuQNkKMlC8d8y(5x35GQQxyWN`o69C808mBwHm# zg@BFVXa!EeQz4OKLwIn*DJgse_0@l!nQ^mrC3zw}Rr3pe-?{>fw?1KVmJ0)88ca6o zhw2yC;+nZQ+h12R#Oy2~UiVZutgjo_tC9E5wcB1qa=6wnFZ<0wIJfw1-G6Iqki6O} zI*9%fgI9+B@1v%=UsT|gnEJ_g>PGiFbi{^c`y_Q{2L$H7ist(y-UGz^UG?c z1VP64etTcFR9}(7??Xk?T=m>eCmbg}zwCH;ZkeJr+KSz4^=Jt@USZ|d6nQ9CAd#rXM zue|9xh-};X4NCWrt1Nws(Ta3&?*(nW+Xf7zj*w;I#|JMCa(@EE7>EhCz)`OOzIttR7_>XOPMYu8wbk5BEK9T)U>vh4sK}(Tnk$O~_0gyNhEOsc%vpZeeD{ zIO4(WAkerfa)Rw&es}NXZuO>vIoGTCQ26e%hlg2j%JZ4*BvQ*Jum!&##ie|$SwPs8 z<9on;>2WDF8?R~Zh`!kR@N?8WcOwO#RpsTxuY?Ji=T;jtS5&UP9QmbL65gUZ<>etv zENZ=lLRM83j4j=l_;zs5I7^vUSU zB;Kc)(n*ZIVd>=daar(-mvxqjb}zqLdf2_Jw$!zIS;>9)j4&dli1VIrdoS4-+PJrG z6s_MIJ&M-pRUJh?=_MOQtM>MdpcQ+gN6@mpsw3#fy<{V3@!q~+v`BCCFj}xzbr{Xt zOE!$=?(G{wbM!_Jq3`sn4xw3k$%fDjy?ujd+TQ3v^z~lVK{RDA*&v#%w{HMV+#5ZB zCg@ciK%eg=8$jcF`ufqwJ<gC5m>^iB_1KYFvLuMfS}6Wxbi>QU`O&-ak^p?~%C z^`fVGqI=O}J*vIvp&qhcbYD+j54x)-x(D6XquPUR=^^VuH}&*&qw9L2yV2D>s@>@F z9+I!Y91YfQ)jl7K#38Z_Ny!|LF zl4&AuwY)Xi-{YdBiG7UMQGL76qC}~ut8@*yPpBJr1bGje*Q0#X3>#m$G&%Q?q{l(1SqJiHU$5yet~QgInwq6LBkT8jKHH(gsyt ztoT5xp?cjg(2b5}W1;P!^$IT1la2;+i}Kjn{6f6^-MbN7gmOoGv@Yh-3mthKv9YnN zS*&uh^3jp+=xEr5dG%e)TcMycHa3nmi%m{eJvuUzjz-}&WfHVl?8xKDyv|%0svlUw zVtb_}JWuBCWsP-*TFc6WoQurda1Z=6eZqgpZ$vUA%Gr*{$~TZ$s^FYg7(yF8HDgN*eVb2Nmv@&4H0FPGwlMuhICYcda-td2T zd)zSlENTJ_R@Fiw{l?IBw1py&YuvCAde$$g4S#;gl=&X5Dziy#ro!ifVCK-==YtZX zq*4^a3rU4^!A!xO0=l9wop3go&c5_WcGvQ;*ZZdX$1nX49c?V-uv?j)s8g$BM|-zj z>qOClli%p(?|f3&EtXjjnNef;nVh@&gz+YPxz4dkWY;006+}dVL@w+?%*kcOZX&zG z2BX8Jb_2wk$h794?yoYcsdjICrUEj52Iz)}-Nm&rT;+cHIG!C3shZ+hC5}xtM7dn3 z@GibX=)2GDN#U9sZN){tvd-A~uA=sV)n&Q953{`dze!r2U%PDodLAiyKaGz#`JUS| z@rO@WzIi*954^$rHb>6k?*bR=<6Qa|jK>et_L0jP`7{A?q|s=V%X3$#&4~97$H2Dj zPvKqQoPp_|Hw0-u-wvsQV(ikvxOCrX&1SLby;t*gr)XCIP`fx3hu#Zs`Ws%p%cqBS z8y&Af?>hG2HQ2HrN9vj0?86@J1@P(K3?Wn*cm9R6B3wQddg=5Fp=~R?1qfkFcgXn{ zz@uM2CERx*m%VWDSqVSfLi}ic4T#A4;O`f|8^972 zqx2JZaW#z4G0=e?P+5N(z(sf-ISn;;>5~r;qmYrJ{~LnBOrD@PAHT=^VR`-$7@K=9 zX3dJrPH1lcgzQ(%t%~*EuNg|g!YZ0`$hE%m+vEOXh1frP!kmMyC;FiH&mLIW#w0hd zbVfCPXAx?})zoNStvU z%h$eamV#V5!YYb*+UCaew~K(DZH(SBUyC2=c}l-LJ}%fD_wmo}DcRBV>YrUk=WnyB zjA?0$ey9|2al{Wiq^OU#uRlKS%SeLC&E(f_%f# z_O>yX-Tu}Lw=bhM{<%Rm{b!&5+Zl+6Bl9%lX9J@X2vL}NdQMQ|cS8BfMXVUa5yUVi z$2wnuON@P_X%_q;Au8f>3ebl4w+#iQtfim|HfoVowiwS}5I zTE$;uHeZvKXU?2v)y)0(fq{}ndF1H*@=wzFZ%4}d88P{bs$isI{L$0uRMrrxVMpdz zU#gz#Rh>VMb6ow499L>wPPa`sBb%=59}Y3!1sHU0<%6tnF7Sta`z#IeeGtwE`csdm zmpOQWft=N}-MVpN)9*0%sbL$@J*!RJFXM;UYp0W6m#daNkqcrbKEc&@mn^I_nkx<> zbj21LJ|Jd%SKn?nB*#7H`8Cn~+QL}ZtFv4$WTRlRAjzR%GBar-e=;e_A%F5+(ng-{ z*i=ykseplcsV5MKD$bpJnZ%Vl`68(}XVNl>D`(O;sW^L5Cy6V2QZ=dgvun)3uy_G_ z5Yc6qkupq5C|EOlQR0iLm~5K1{IpVvZzDJ@RUyDX{kY;b|FmSq zHU4Rdii`Zy2;_8Wes6XEtbne4wU=d~RrPDj2&-xjOFOG-cS|L!YFA56t7>P<%U0En zmg|<)4wl`P)pnMJmen?v5th|fmUfoa7M4nuWoDL~OSKot4DMH)@*40|Z15Uz124wv zmU+1pb;g6O@sLx|i`50Wym9N8C z7BHy&8Dj;R$TP;~EREn*vuZ;=%LwS98hUtn#?!kMU7}uzuBeY7F-SdoX1hq>EG`5DbQ8#eJ1nQOOxEN{jnx_ zMt;b^$oJW2Ys{6egi$}bk;2Lv9f0+6$hq(U&@L_a1ABsXY@ii^kNw>PA=kypKot(i zovzU`F5bTW5r5+0=G%4G{$H1Q$(`x|*h&^^-M}FSI9;D(J;LNGv8S7a%vw>P^hCq$ z@4DE{U-`MSFMz`LPqWeK1YxMjbIUb%^O}EAj#7Zb;KMDhd?}^R1C*;sQvAn%(ry=m zVP*%du@B9x5yfCQxukCuv=(ISn-Dz(8z;q>{#-<#%OlsrD^|~QN!u!yC72PYYwDRcARFH(1c=8_Ff1Da|?A zvxu9bDZO1il1QFs)xdD21T|O}S(a)4rm1|)qpi!Rx~V|fX1w4~cinO0=c1>VLQ`DD zo$>v?9zUwDHgW4lNIReXNBeRodV2({yqe|E&mZVvLFW_8W* ztcmQ;F6*FBT|u>@X&%_6pKCFisiWi;-0_&vG|!)QSZ}Fd-_Mi9ZG-Du&c69mKOX9c z&tM9BsswvXEcvH~J8hErrmf4JLm7k}8wTB;$~8_d;DSX8-!LSkZiF)!7{|bi#nfwK z@DGcA-Fy~e@KfStkJ}?t8@JFLPE`+avBU4iD2tnXCv6%R0tYQerBtUsV%kX6)}qhK2NdMK;J}etDqTFa!VbeD28-3tm5m&*G$i0iWmvIk={TUR#UKFI5Z$dM=<~C-U zy~435=Hv{Jd>^~NhNTW%dHS2rS5YnW{?$hrz1(|&OKU4c#V#3al_ZT!E!um5mZB>} zD$W^f{J$jCtiN1-lz}7jd`xe+LR954Kt*ew&bBZ-NM&o4&NlKyPAxC>O{)4=Lq3Kq zHZ4D6iaB1-$JNnc$wSbu&iV?`2J{=hmChEw{#flU(|F|c5YjC5|S36eUuSHw-=ah;M6Fqwi5UzbwCQ% zk0J1UTo;h4{!Kz{MG^+jg~5594tzeEef>nNG943ho&UDef!Lk~%U;?`Gu{{d zbnaKw&Vfb@_&VVVZ!`S*7N_}m32VBF-3 zY31QarZ~g6Sz}}NX2hW(FLR&Q>WcB3p#U#}?!8FTcn4~#d(`e0=|2N%E^^+L!@qVN zxkc*QM66XQM9AqNKA%sgcde-0r6PCj@@wP8ZSB-<%ZAlP?|H-{U30O;;xS}+uIF;+ zFHQrJ1H)1E-LX6`tKVzH8;t@yG*|N`zn0W5#r%>erhkC@OFH;ws<7~6y+NPLsn?&? z*W+d8@>Kq(Q+G{T&oxr;w2`|h6K>8GtCHM5O%0WGEVz=(ae+4YQMO>u>`m)RtMaLb zRz~TaZEV=s-S-!gxIZm83S#E`MC{TL9H-aR_Vi*9cLw)b4@%Z!B5wQ%i(%hzG%kr@ zW5((W%+%Jk& z?>`Y0^x_=I|3iYzg*zB1S<^JBgh~$GO1#siUQhC>C7ymCEm8BizEZtvjNJL0{bsXt zc5UmyGo5duxe+L}z6lPcK8?Qe@ zYtLS#zZbJUiZ`tK>P6?%95UtJLy{yRgVA>}7YWo%))*;kZ{O$ld5@@aQDx$CPDVJO zuF1!&EUTG#=rUy)UPiXfk9_;PWs8yP`M5?yqpa@O&9FwZSekE{QH2dB{=RxvZ) z-O(~#oxkS$m-3$Gt`U|~RUcK>qTymCcRu~FO%@T* z=Fv7_FzI!gO-XMOmGtq@x2(x+-yp(Gr&N6^8d(+N*f&tPu5;AM7;{y;=>Q%6^7~rc zy6`XI)0{VXhV%h4GF&{^rn^^|t0NB{+h+t`Ez7Psdg*K$KJmDds4QFeoatq6)I$v! z`wa7Iu|bsAO}iZ=L%HPeDGZtpAKPO%&XrlXQkZsMA#Z)J4F#VbQ5ftrP#EaEW~y#j z4Vu_dgMwUmBIgxTwK9xAgMKrS`I@=<>% zzrmn{&>3$i_hCMT(fijhh%R%T_jSJ&7al18%~bc37|KOLxs9+CMs4tmeFo`7UzgtB zgoD{K=Q1Pygsi+{LBupuWGeX+W_FYR{c-16;7Yl1(27e7{-DK2;=$XqkX929F6y1j z?0$lfPUON1<*tCvT+Ty>80I=X*gl2rO=1Z*n>px@W1OUf!YHS!-|TJB3a?fPjXiUn z+m3{rF#KVlfx>8KF$wQRrjkSnO(=7neeAN8MDWTxn8%I}%x4i&XBfx`c3A{Ih0&8! z3L{GbSj7sDm@itWUAjyAE?~ns*BVbXAH6F3c0%GwE#6Z7`!nkEOO|!eY$j)@jQ?fU z0JO`TcHzJPZuVU@4#ltzj?RsNjZw3kl*Rtnq#os7pUJCZSYKdwpWR=(O~oRX7`?f| z^XRKox^h1Ou6$#SuuCT4Fwx6NJ>9>&-Q@Z9c>{g2 zz}oaks4P!CD{=J6&EQ81FFv3~XRTv4UnK8FqdMCNi#lFk#;PPnl_#k*O_%oV=xeOL zr+p+i6|T^r6t!X08y+FA{U-}qos?Lwp>he;lJa+E-!|e8qSnBdBB%QJSjoc$!e1Y@z5FZYrq51v|r4-pJbei)3-)-m*b~vs$-{6&+1QdtT^+9mQl+L_c#1JmHH|+-{ecW4>YX4;Ch{g z{JUpW#2lM;9Fj9@maTJeGE6yKTzMLEG|Vbuu)1E3rCFpkpo!4Len%DBM}6Sap{KE5 zXyU3bg#b~Svmdf_1m`K>< zgpI+cwa(|)G){DMVz;V5FU>_Xu}F=!8Oq+=vxmdR}JF1|IZvEb~E zTPx@cKA*kT@yFBf@sviDwcPE`DK%f*D*sFkrZKuL+74A6A{FpUfIIB~HxWORxOzN6 zrLH7CCD(vo2E%~g%{Njp&xM;YM6`a37na$rB|^?wOW-4{&%LKgu1Uw!d*O=nC-C0D zBRy&*|EI$DPc)v6$M{+T|9W9M9*>sqe%I4m~4w zn|IRglkh(ieGdXH(($&QW7Iwn(EcqVro9h0qWmpkdCCu&tqRW!_?_m1M7f7u?`xDrEz#e!aiT}$rfPhY25bZe)wjfZtUZ%HlHrGnmRi z2Py4}_R(|ZC1AlO{gAZ&CJHUxr1DN5J%T`mqDQ4NxTB(72 ziED35t^h)U;^GUyAQ=nL!pkAVjJlRMVBRWJ!iGReb^EYBJz1zP>8hhH;YIk=1?)_+ zmMQ)%Iqo`spv!SHI9~IB+xgSUb$87J#F(tGlZobH2+{c6zT$9~0bNDRgKfD*^=Aii)nrCCGwkjS`osQI;Az^4LjjnOWKJge5E4sW@AT| zVXyaPyNfWtzFQX8{L*PL@s-Ix9r4cY!_RT){hS}}wQyr`3QsDbZsz#zzP`tk5G;)Wve>J3#=ya+VvbrjDQfIJ-%ZrZ5&QDLUVJ-)r9C;r;)0(n8Z8R) zBYN`;1UU!Ij5STCik_Hy^=i!90?n|P)b!T0E?V$=4RmxS4)Udyf?cSR`HsQr2n$LJ}sirYC~(VnLfe)y)oH}!N=$SX>j&r68W z=3w=`^+97n{l}rt86RDw1$;&wwY*J?UmSo{&&8Z^@=rp@D>)=zxHVtAc1YuO>SDGS z$V%gwO6Q4h_Sk$7^7C&n@7{a0o)J3+-Jz)lQ!-A}PvugFq}BT6q?%MZGs^{4{t(9v zvuHiH3gMm8!7hixst0n9#tk|f_GvXyLaH<~6;-^J6WU!9c_NBBxhC<>iM8=)%h6tY zs}SCdt}CvOouYWRzgh0S1g>55X(haDkD8_$b`y|EactcDxnle>ST`Q7Q%)pz55o_{ zQHYMyXdr!^Qo^0+f-luUrAK`qx4NIDD37?GPst^}cR?vdZmLHtMbJtU^MJ7AW@qfM zJWKYw$TCe4=Y{zB4x1mbno5>hcN@)RgPugP=6+O1Dhklx=eor`a8eY8OywE#2r_$T zj3f>IVZytP{JUnqARN2`f^PZQW+=;~Qe2$TnA?-ZT>>_m! zMT3mZ_&Q2$s~{MWlw^Y6u{3_X28oL#B@Qqe909Y6JWlD#pgTr>Q#MJ_)z zRN#gRMt4QC8=!(0R5*S-%6@vA@`9834T>&Rb*A`~-x7C4CH>W3`d`dIUm$%vNyHFX zP9=S5ic0yC1zR3ea$=58VUoHl+8hj($kY#rLN72s9wm~!MH%l`Zq+RyMSQnjt%OEj zV$wj-JQUA@Jh4L^zw6gyoM)()WT0{MEy`O&7HoQy zU8)1OU=9i}hj{3o9p<17b0~p1m_vn$Glkn0!M~MO2qZs1`v+kcETDbgEy~Yl_Eb=O z2imJZ`($V@4DH>a{U)?uII}-fV1o*3P$3=KOGc9fL3^^>l($^W^{zrGd~U*T4JiE# z%@v?|0yO6ig5imw`I!PcRM3P9dC*+rsWwGCu{PVo1o^idS}D)8StJXgoi4O1gLanC zZUowWz1F4rmNP!(+4w$n>&*RQ9rLT;cHGuO=2?z*%Lf?>OJLzoTeDOJZ@Y5C3U;gJ z2)S3gjBIZUJU~(}AEQQ|wv68nic#72 z*V97;hu|GI09sENpU!V04Oei$<`A%;dVm5beQgh$PY*$kGi!T7(J>Qyh!`ww0di~Z>NMujNea?Qiq`50LaKt=i;b5qH$Spx17=!oa>qvBBuKqyJHei z1;QsETl6*U;l>wAzi?)drKpDs!OkkV;c{OK+huV};L={-=>vJy8X6f`o?xu55|ke! zZi|3++vKXXiyeTkzZFwj9Yri~W^VBH>>K)3Fa#IWN-j9H(OSFd`ST0lqt!glF`}C5 z@u~?4CQW^42jPa(4C@0Q39KAi{^ulZK#(~e{CEp`mGp5TeaL%*nEpRGv9`OH-vX=D zuwB&a>7{=;#PHZ)7;kogcgcX~X>!#zH52F;qgqCKPamn{sHJf236?$XNG6yp$3`~zv6Q(x;cm^>*=52-G z(tqh=V{C3tbv&iRFdF^;#8JIZy_lCk-&t_C>475?=LMt|Q8&m_2Po-fAQ@6|@VE!0 z{MUgQ+bGix&@tZ&zES{?)`b+#{V{q3olo{(9ZNEu?w~k#RSGAUScxTHJYzZj3;VPPq|6Cpbq9-38 z0<&q^U|K)evCg--fYk1pbH_cGu{NXs74Gc7Dupir<=bcSVJ<&h^Na)`^ph9r&e;uc0eUoPDpMB=% zMPwc@$=pSjcRu~U>jLXID9Gz}PX56Hayu1VLvCVBGH@^lI(KHQ3GCiQWc<{b zlHUJ1_`m+8f`fbc0q|IYHNM92@SoFzgJ;z8zrjP@{r3Mxhkp@ieeTPR9YhW+;a>`1 zpUG+?=mQWv8~O+FAOr6k9G(fv_uyZ2ARzev#AW})2qAFta>Z2x*~WN?43Ca@4oV|V zBZ77q4^e6%lrMqVh@QX#*k>C9XN}Rkw_q!6XKrJLt$DgkZwqO!j1^u-XIi(jq^V_5A`mGxh;WBzQ>AI_cz=#b~3H+%o<8yED-%;ul^5O%hmp`UE7 zaq<4w!ZtM{XxU5JY3r0`bQNmNAIltOXO#*mkw<^&+9ToXt}c9*pn3RlkpcY-zTr;b zY%2@>(ot{t0cGLPlDp#Kx9>U$2}l>MO#S6=~bGYg--#a~4l+;Y`P#dXCgtHjn3s)|Z`3 zQCk2YN+?#|Rr`(S0qy?oOa{NiT(pS+nw81or-?9BgvR=V=qg>zXs|czJ zvM%?QCvgw=SjM$KT`M}U)H-I}Tqzs>_#o`*=EN_H-mjroJ-qi`RJ^Xe;PLns&F#7R zq|NK?C{6t)WXsIjI(fnvj#lTBHV?al6+^{~S38nF;%a}`3T<4h@QI--lv+c;tugls zHQT3qR6_*os93jjDTTN_JW9l#tTxz})OX`H6~}YmKb;XI6^di)t*%4;>P0_z*07i| zJc6FALydA8Jn#ysDL>vj9YnikRwqy%)_&Y^P^u~Ill)O5_;UvJEX!rM<@{kUraJ*| z*#3}9*frrze&I~Ol8RR4g16n@y~(Aw9ex7*J`ErBF*clBWvGwU$fN{@!7gPJp`Nb0 zy3;*+$>yZ9?p<^b*m@uF-YEI`K4pcW>a#?rvdzkv)l}EYQTXPAkA(?kqlfhsp7*rM zRkgpAQ=PYbi8Ho)9F^NkB)nb}CYSrMrr*GHCRV5?nIH3fz3-aB(!v|FV)hsT-Z1L; zx`m-9C6xOi6Q{@hee?}(^UvZlW1%En-+BM!qBOY!a1-w)9RZx_edmbN1Cwyg*`m^~$2T^Bi z+&fmb9BD<>=;U-tb_HRVOqT`o#SWtG*tqwsY}|6H#?i^&=!C=V)GKdxbvK#?* zG|7EB8uN#|1JDEtzQ;<2u;P=;5imxRNYK#)!XVD(_vbtOT4N={Sn;Xk2-u$9qLfg$ z^>)5P;&$w}a8~^H(&xFNNet;|8XofcIh#}b>5yoPZC1X{e8HNIX5=C77iV*)I2M_6 zY_Ss2toSL?=S8DQUQvz*cMIS%$`K%rO&HHlcd99gn8dWi^UGVu2{(gR(wu50^MkmFR&J2-T_6i zAa9Fr3G(UPv2S!)-tfo~Jc}l2p`!^E=Dp!+F0s^+es8G926js}av2Zhs5+|2T->u9TF3JsOCsRigBzFKkpdUehNnj4soIXPy6%g{Q{=m?sbQB-*Sv1~ zytJE{AuoxYBI6pU`gBC}`IxHC>!k4_yKinE`A8oR_=?_2AY=Eb|2gM&{__s?qAH;b z`?E<_mu2^W>~Bxmc~SSZh!sC(UGB5y4r+}Y4s5e>@@M_=55Kd*i_sdJn5lvinY|F|7d+k=g8dD%Ms_|UtW-(iyY_3@mvOXuU*-y>x?Vz*8p9rY%l}I)7L_ZFo0Yo*0Fn{|2wW7vR&OlmABqKP3mt%bJQi$q!A~YD*pk{J zUNEC3x?qlDl$Xb$vZZmTi`^&k@=M@N`~rwkSOA??tdgCebTk&2u)_~?9GGp31xUR+ z0OijL9HBGh2UY;yMq^PytRSt36)=8}1t=3a+-w>vK&C}wk(DqTCD>MD0kg(;fC;1~ zx-dIL7#7%k!~)|mEYgl10|d|l;dmIJ_HYNVxh;U%`3|skYj*a{DWCGK4v_1*0BX7y z0Q;Lv>$iaV_6~^YSOA1d3qaF+2V}rXg++EiFFgNe((dZ@9pK{*E0A9R`6W^r%)utg`AhhrLBovW!V8*;~7k`Ah(Vz2xg?Ya3Y!B0Z?j9jDg`;zzLgQ zIzT@xlo}?+)42fTV9aGmy%ZNfF)s$$2zw2sj6(%W;}Cw{h)Z-B&<3qmI)DeuEV;iN z4iy`X1t)YEq_!LmT!TCwtQG^+_F=|x(5ou!1u+ieeGf*7HI68 zGBv<%gUMFFj%n4sYWnX8JM20P>A=!s9CV5_}DrM4;t;5D0E`Guz zUtGg@&1c+~tLkZ)%7~Rva3$)oz3AS(&nJf?UL=Ify*#?pLeu9~PbEirkf?<%0o8p( zwd$Iai2laceVhE@<}%Z7<@ryR-8D>q7hR!5eQCTA|BiIEuH*(~+$EdiO6$Mt#H$#p z2S0}ihjz$qTl2l;+ZpqXJnET9Q5i2yTou-~H>qsn|Dp0BUu&V0Qb!TsKsY!}%}I0>Cw|s8)_uu$~X6F1+~5I50pD!h~D` z{6H$jdsRYw2OtU;0CK4V2vKx`@x%@QZo#Wts);dF0hi`2NEwd>5pV{{uwXzCB?cLy zh(jnr-|>n#zz&_{z;+xfKq%h^h)9S@8Sg-@!=<+q@M~I^1@L|Hgt@a5)Iir>6^BwS ze1QBj8$fE^0-y>`zfTYfAIRa5LGn0o4^G)f30NQrr>zU51aL}GEPz9Z0qwAWafJnt zrM&~x&f4J%AQxgnmh=u_y|n{2WZ)ErIo81(rJ=b^EJSBW*;p{hf^~K48>yLfWEVrQ zs0aw41YS||QaC`AfJKbD-0tiJGfDC~(5PC(m$l_3>5KUk|GyI21ClF1VS%I3l)Eva;cdP)VbsLI#lg`6{fp=#h_4^%|m4oO6aY-Ly-Gx{zV*6UqhqL2F2u4ou z-2Ve4QD7(U&s;})44521qU{Rs`2i8B-#o25j5Bq{?)V5%(1As*X0Eqjr1$5^XABd` zJhZ{o@Ki$Da@O^RC%@<4FBCHDs(!UqcF!01;jPIvY+~w=)vYD>klS-dR5=wmFpmuv z^NxDC#;#@e35y_VB=1S^NiU<_OIzJ1>F-%oyaov)eiyU(}RWE}|7VvWsgj&7!^DFnV=!Qa#Grz4e;& znpIWz53Z7u`oqGRCjF(grX{wr_WE~)%ja_^)@#eSn-8idy!z}l(~e|J&C4gRUi-i$ zD<_kbi8Ly1yKt-4luW#!O%S~iZj@MXAUfPP3NV;?zy6DwKO+_9zePEyY-jzE{xh$i zBRPZrqxosBvXpmHD3R3R2TE=n2N_?3CGCNcnZTe&4X5+?Q+H{c?RbYa;iyJCgL_nJ zg{03jy8=7q9ZCV4Nr^=E7me1bLY@4A&&nQh-|mb5d0(!w^+M7;E7@>%UlnC`ulk5F z5mZu<<8e5Z8a=Oy{!eX{8ILgm;b}P)_yjhKn@OeNmj%D(+%UAQa|q+inj{n86CY3b zh&1ep*0b*tuQux0?t8Fk5BJnPLJYxm6zlxn_ZbyF%j{6TU2m0&w$|{9M3)@fBujC1 zu@&q2|NnIqsm-0^94x97PJZN_9gr-KLzaeNQ5^680*U)OAPmCQ3+^3Y1ZRD*(E{+9 zu#*+Tp5KWDh&dLpWm*~=ia!4)ase1Yy` za7d#gu;4p%xeZ-jhX`mwjR7|xC8Wk6k&xoU_Av+KAqwz8y<_Or7y=3(qzDK&haWot zk^?%2j{6}XQ73hPG%dJdbB+bG+3x^y2;zmsf1u?7^+W35+6F=?xkoHWV|xeG?#F@} zc>cTc0yv3S0F5dTl_yX!{*Q489S8_jaB&8a8R$^`2U`D@PX1CjBnqO$$B2JuCGiif znEyj7h(QqCK6C)9xGlmrEC7`Rq2n3`$Uz`@2A81P5O-Z+Y@T<38pdP`TL9BA=XOZ# zU@e3&7d1$Qm}kk)w^N+qrX4Ibs`K#V$y`7tgr_RlW0EkVEZAc=A=>0asst%M9E@>T z0UNaDx!wudAzgmEMVJBM>mAJD4hC#O_GFn7PPjtqW}ONCO#or zSjs}X5^%2y$9i`p9RmvXz-FOPgfadfUS*jpuJpiUaJRcqdHi~3ao&__gM1bBNI{#c~kv;)2Ux z316*tjw|p7+558;bkQe(gx~5D+Y~=eSN7!7FmJCy8O>7J`;h`i@b_=}Gi&frDcU`m z>r2e*%-88$kA^d=kU>@8nL)7djQ#_|df5{~!UPe(dNtJ=gCgEVrnER>*Pb3>ReJzu zn*KM(WO2QTksT?y)UET(TYRy|<&uwGqBo7*8oYdqzgTQjW4%m#3+;QiUF^Na4b)}1 zo$Qfw7oW@3q+t7=s#+xW3%8%-=(n6;=#R)fLpT|~j{WswFudh`wWeizP|Y<#?GN#h z8ogC~0FI#DDz>xB@zL52iVKm8H5N@kP#014rRZ-yoaKKa44Nfz{j)j5)MUZJ+?)J& z6RSz)fT!7)x1iw-&nq#lzuskOS(}dpz3OFd(jBf`pWGq^Kb%XG91iR2A8aivSDv_D z^haeCHU_LS&2E33JO1lhPk&`vx-x;5#)-wtOwEJ{#bKdb;i1Lhp{pL@ROVEy1$$nPDnLuL3ryBRH`B9W7oFgF&HJf8j?=vL6$lt>vuL0`o;mb(EeFY)4iW~x+9&H=UY~$%4dz+7| z*IjL88suoE5G42DGu?-hEmZ{xWk2!pa;@6vpXi#O7!190815Nu1tEbUHmArWP zvS|ICt{--u4Z8Ycc5r;K+5NH8A@I-2&pP~)s;EJ)ejVOyky%zC*=9Q(S+EQL;~l~x z1^xoA74W@7s&$1G%#KUqK;U0B9AnnId;#gD(X}_c7_dXG*$ZaNaY#R|?xa`;6_sLd z;CP9MeH2&?w1EPC;C0>i&-ipuzt4SOQcKuc5Q+Gjm?U%sVf#@1c*RLM>=84WcU7yK zllG(SfdQ$X@Ley*`(?L>26ZjAC&mm{kFaioRrc|2ElEtN$)i%ocLbHOFO_euiXt7i zwUj+JPn=Bl2@xinU~_(C`0r+Je~K3HSi5!jXSsfauinEM;ULP4YiPTj-&C$wEnxL^ zIk0*OJ^OX3C|GVT%d&VrH=Chq{p%yerUI;-WOI_0&2c>8^kFEc;bD*O+C*i6=%nH$ z$M>0~dZfKNvR6yCxssC?osD`-bX&bW)P!>^n@&9i%Oor$q}`txlU?;^&TBGiIiqIA~*Ie8lfAr*3=E$6mDug#JlvwF5lL8t~(F-WUIqKd+v z)?V9V7+(X+-vdg}fSh!E31}taz-Em$Tu#r0&vus59iHy3Ao>8i845WVNG$IvdZ6{V z6d6-^c#KNO-9Oe1Fh+(71XDN`jWe1?d!BX+uICmtVi#%S^%;S~Dj@cGZ>>~isnrNB zTVWVa8T(Ez-EDZt1#}HhW*wFKm1$q}sT5w{MYRMxCV^M1<%wLbg`QKn_I@5{31Hf_ zUPo}0ow7Qtc2<{Z^MduYUEp-8YjIC?a0-=o+MBQ7)4OWs@N_ldCocFc=Q>h~!f{{? z*gk>Rnz)r$F;BNaa~n8JW_;noxZdu&lvI`8@~U@Pzp)-UUg}k=HvY$xyYR)JMZ#Sl zxB;PufNhRRdpz0;jD{7~i5m9D!E-I%z>a%0sm*X|6g&QXzX8_cEEFm% zZtYj+7{Gf$aOC8pZJ-isl7RvDOq|!@^W60GPM=$kE`m{fc$$6}vAD}&3{2oFN*95F zAUNzxHZRFLyxWa4$q4#`Xw>9-`ccO<={?}AKBaO3AtScKaLN&Ttb@{yUiPYk+|ya! zxZtP5ZBK`I3)_!O^2TrpGdLnD`?y}3(Eu;1AnRk3$&K0eT8QPYRHX9~38!;!yW;SIZU2;H8NBOx5 znO%(%o=Gz8E_!>GA#x7>J+3-6EAQ!iuVwIFPh|-NL$lhAG~?&P$nI7^QH(1bIfyA& zsm^)YuAO+yb5Fhya8#Xoxv$Ry`ZE-wYj=FVJHc;bQ@2Z4EcfTIFdw1gCT;C1# z9bzkzSET8+c+B40gGNFJ1h=Cv1PGaG3cXn9t&D5w>H>Jx`S15`nTbT>S!(eDrB&p8 zM%txK!0{xR|1=kymMT!4dB-o_0?RX!UWK%%Mt&CdD6b9g10+PmwJv~B>g#(@sDvxk zIBLLK0`0zhz;E(ZKq&lp%;gWbN8}L5Cf?o_a#F*Nz20ySJyQL--}4f2GQ789iavC)Q$dTQfFf`9^I_#bQi7zxZ^};_JiY zZn~-6YMwhk+mT$8W9AXXLm0-EkoXety=ot5cOpyS!bI_&_b&zvc(vle%$b%{^TNGZ zx24ePrl_%~Fw6Wtek}H;C}Y2ut5NlznOVUFhso;Z?O#?(JJsAK^ApwLLTVpV)+d^* zwSGl^)`(8wb7gtCzc%*@yotS1KV9kU{rk~FQ?;2*Y)G*Wt2nkf5v-IgUh(qr*!4S0Yzi__ZPtQqgs_${QbFvmi*D-#=AvODDk>30Ja&#{v@A=-ap*{1&M4Qo-5pIPJT<#(>aA1HGeHg?IKr)`@xGBhr7fyoG;8`=9^+V$)c~c&K2@M6RC?DI z<|4HwCv!k*aGdEb*WT;I?t4<{Y1NzSU58`q*Gp?_g^NPnl_|N@%ys7eUcDfu`|>3m zgUZ9JAD2wI=H$E7qoCv8BVBN|B3oRk=cKu)c-{6|A_`>IOVdSCRG*HDkSB$#iTH9=t~|2 zgi(z-K9mxYnZO9+U*x6BNMwWwE{f1)Br(E-p)W-g5RNrC_!%k1aviUFd4Ia2{1Fyb zq#0>z_Ie5d$-w@AbVXQB(gc+brlo(tjQB;V&$1pQIXhi_y3v9@|KjNnxY=!~q zLJ~SF{qQ>_MkvYaVjql`d`-?rWM!eBnLkoOD+nQpK?qv~SPvi8OnDLUkSX4w0t^QV zeFTa6f#`b)Gh>EUkU|oFAP!-$ASm=TBq|@#r}K2S?L$zTo(R}lkMF&H&*gC;aDtj; zUcAX-%&1>dTBy2Bua!kA+{?_T*wgCYqSJ|mPO^J}s=HRsd-zeuamHP%yZSv~cEom8 z}J%2t54juk~?6z_cx7npT#-04_9l{wht)CXwH@mebY-Z_$-)7GR5K#1e($Q_t< zLoU5oUz+ap{z9Y!>35&fxQNA0FZgd0_4{}E`a$#HZsY31Kf~#8(aVgYrN%=V3#7Tg zuW#Nq)R<}BY14Id?++E#Dt~{R}FBERZ z;sbZhrNss!|5#xEbRv%c%+0dHDi*7Hqa6NBD`KNY63?=l|{u zOO{U&t6s`(o=1aP`@FY(;~$UGRXS6?wv2Z?_;cFF5X>`~C8!+ky2R%iHFklP3YTEg z!y88NoDS;~s{JdV*sVM8XZB2|>#e=9PMtW5p}*15XChCp)f3a>Rekc6qU+o)Yb(U) zYoAl??e8&nL~f>`R)(Xhr3m`(-{pX-;fZIyZZ-J6J342sK}yC-!9K?vv?8+`63yra z5(Z&MS6@^DDJ2l|UA0jGk07}Xm-x6SK+ty)LS3RMKYF~;nFM46q~4@6p_VLEx~`hT zdD0!U8cIHF?A9Fp9`hM~+0sj{yI22_W5s4O+5cm5&vn{a%oy7yk4zwi zg_)PN2!F&qu)z%akHJ5%8O)OAGJ7;=)J8coi&G>A+h%}zlSIiO`k+B=%rG-DC_e#2 zSMi;wfAdRsMrbE{RCi@7(T#3oA^vE?$CdXblmPYzhAo(b1P9l2>rqk z@v5R6lC#ee`tPp3r6T&&;wb&H5q(-PGcGdqkI$hMPa%n+h(kpfjsVtECTI*5MlvL7 z9MNYDGZThZ&_Jld5zm!iIZV(+NYp8!?@Ju5-)#{u>D=fY6s%Lpdxn2S9sBJLs<@Dj zPwsaI#jTvpBb=h38ZgR~FKg*La+Oi%_+ZSOhkdD-_OVo52;|vu4??jnf497%@DAgY z_lc62v(X}hX^{v0=Coxd{}t#~^}T^1qk|!1fFWaoA!C6dW1S&mpCRLnA>)oA1A{Sx zfH8xVF@u^h1Ih0qUetp4w~%s;tf{LRIU4?Alu2C(DXe&QMg zH8RAnX`orid`jAZS1u(i+`}3yC}RAr*TcXgqVXH!K$J&%wtlEfGJxE4p#8kWQXT0i zL_i>|osTStbg*B3&-mFhg7hAFVx-tu(GZ1d_^V0N*)ooD7!gwKugK9+MuT6-GJ;sM zaBEbl&TaZ7U}rGh+h}_JdL&%)-IMDbg^6WHdQ#^NtO5nexqh)K$cH?Fq87XhIhy!d+*9-Yl0kz&PBW^!3!2Yp$P73kb9X&}LBX7A|)r5VJDSXGJ z;{)5o7d@(9CjwM8T0Q6ps0yA9_^RC4z&#j&{72#bs#lx_ce9Tpo@Z((Al#;&Q5Q{3 zc(_jeqvD-fqZXGuaDYfg0Ec<8~6_A z4fVmlSbZo?J$|la2{!X4gB3GL@1rQ+-`OBU%Yqq*Q0h{k;Wrj zzQ!f1$4Jxo_0|n3rC?qo9q|%_f4fzCGAeBf#Ny(-iFsJzv^%TWUh-<`mP%#)oBxaV+4yRaGVO?4KA+9Z>D}VP78b~M zPbKj2{PL8%Nd$NLw%H?O=AyzRE$oBm*lu9Su87*-1GVbl7t_MOo}WuehMbCHh&8fD zzMdg3=4CYb$YkPJ8Kc112I1?2@Qp$E<{*5l5WZaq-wA~8MmG!t>vyJm%SX5pwCs;5 z116Ld1ESncg78CZe#FE2G$`|p`4EqqnE*+AT`@w!Oo-f-rG_y-KG1%6R%TX^`}yfB zs7fA@l^0&*4@Q!ZWd?>PP}0!P@(dJEQbLGw5CWzEt6+q#kwm#7UP;23>7k?~5M=}c z27~eQW649I98?&5kf;>ID>WE12bA;)q(1}^pa`3QLY*K{O^8@gigpJExR&g)vJ&N$DWU5r{4euLpe*2#jwC=r@G?H-yPIg#R~0 z#y3RAH^h1y2^I!K5d#v40qMqoU}HiQF(HANkZw!}b^yx_z97qQPdu6~tW$^Jvg9#x zEOJU1ITehY8b(e7BYy@Xr-hNz!N}=hHbz!fcoXD_?KCin#85aY& zoCh-3)gng(>6NFg-KFgp>jQ@ zqlP5m^vjm6EM1RUx$U-4%~yhJDNriFa!OlFCa?4qh07N3$Vf(vx9Ll+^J#BQ&+7=O z%qsMWP^QC3sR|8}Ay;}6KCnzIZDOkr=x4`NtWduA%W@$!Ec`glFtmrx#uK%X$@-Yg zK2~La1tO_*`L*aWZk5yXK?q0SEBc|dY{ z#kkH5>ssu4To<6PjBD^N^E>xiT!rF>qyBt*oTJU&w?Q)3GL|KiAjjl+HO?jLnmQDp z^OomeVsg;p$dcgFm+hRjD*+Lrw{1&;f|xF?W8_2k;kiE$bIe22a+Tzb)ge|%a61aa z>|J<@*l>zA-=a+H7OM9}4FLayrnm3!o4RPt=!Fj{cNS4S;m2|vM$sE^0voK@dg#&y z)UmWkF71vmU~y_wY3LOeT<8TSa>2%cdF~oEhv! zPXhLv8}UQem4?5LgA0Eh=PIyaWbRYCf%Zd=G`Es1I;?qzM7vVR>PjQPzt+HkDMyll3hp9q094gWf3 z0!=7cJ;Ymb9=J}AH&A<6n|}FHj!dmo%cl2o8+l+G0>73V;-m*U&*5M=N`q~V(-4qD zmQDd>AHg6H&)rH+7Fy;);qw6)&&0tpkM|I>^hqwXxmS=u(Nwjh*jMIk)MlWS%yoxN z3(e8*Uy0J|SpyCVc{#u|46ulw0`@||bJQZs9p}-zkqj@&blUa%fA-ur%^ovrqfhYA zp=oOpDfZc}Da@kc91?-wPH}HWuHpmQkhABubT1FoAA@dZQo4uFb4l$J-p=tW1R(_}`qpF%A1sRssu6bQ??Es{@%7t1tyM5$$~5GIAQ=Wb;zLsceI5eg zYWATj!B{n!i9ra+zk(G&pNO`}JT(=`sH|ght!?lg)38uP~qk7z&X% zuwS@A3=jl68G`)@f}H}vPK98nL9o*z*y$1Mj0kon1Un0Y9ZXNvAW_$${Fj?Z#m-A8 zXF{TGPU%0zkg>#&vB{8e$dGZtkbz>zz+%iGWXvFE%y`C!r7REAHNl$*K%8}vFcUyN zhpZh`$vq!<0#$hm9e4^=p@0rhKvgIQO|N&Rc~bK zT>9RE9rl{gE)|xqLzG2wS@b!Vfml6&ih9K699|d-c$Z}X$xrVbPT|dI)(m*5hDpRl z($XZ@=P{}S?fBAP-PlCAwOssb)5%~^CTOCNMV?rveg~CN`qXlfNRuQm8)qhHk*(8( zwaPyP5I*)u1|W7b+ic!ZBXc?B=vLASnF24wo#N}YG`Y8OQ1?@lkus3je=ZFGc)M@<1HmCVASoNeu%y@2bj_9#ZpEPorSnsyx z@_KHOW&hdHyO2BYPPo$&avs-Jc+z`i?!LaV3CQTZpnWIe>Y74sdz@vvirI5K;Zei8 zhItIv9erK@Rqa63`^2%CDQg7gV+q5c|Ki&y{bnVIcu}OC&ey({33?3iiQiZ`F{E&< z@L1Gb-Ld zuS(p0f-Jvk$@J{Il4>$=%15ex2J^7Bmz8x^FPLpseDYn%B|d11!ri$T2e*F42c$aU zmU!~t47lv{e*9@$vK9p?qg+Y#$!|tHBIn{NE0!Rap$jS5L^jqSV;bTa5G$9D~j_BKaZd}zgKU^2 z>Sx%zoQn%m4VdgQLKr>oUF__|39D0M&|w>`PTNjTZMtm%=iytKKVKd9PK}RH!TZsG zMBETSX0GLi<{dcrciT9~dw-#Li|h#?g`Li&IQI?HmZl}M4UIXw>>1p7)I*X_t?SD% zlC%b0{@txeET=?&uF28vMge8N%QVh`&Z=8rS`i>(xs-`1<+(GSX&AXF3#89FHsLM& zgX1N(`aJmOr@AjaeVf%k)i0+haPTk3JvbC_8rnbRHyCI&p;!1v#9RDz=8azi3i6#Q zxA0=1bYZs@u6(cBUub>LyM_cx%bTCK&|7*nIfz--0namC>Yz@=>2|^6?fRh$F*uv{W_P;jqOf#6aq@HG-wuq${0k5KYYZXW#@6aKU=9Qi4Z*)B^X4uVaiEIx08G2RnkxM5f-#`*2>98` znFi2)&TlK6+=f?P(SLl$pOmYujNVbW{jISf(+=)ncrJ~{UEI-{X~*jh=f2_>RH?N7 ze~nMue?E|5Wcs?$pXnI$b?8x)=iEeOKm{GjyL%j79Ibm6cx{0WX^GVUQ0YV&rKU8EDLt70`5A_aO{Tb$s>j9TKceIE0HXt6jax-1|-znB^ zOY_l9*d*?wWIf;AzH>u{O&`{?<0T9H(XVRIUBI+c&)7dhYP(sh#6eroNelX|Q!oYB8+~>%;<|(tB^*S-F-CfljM8 zPWrgPWnK)t&k9Pr-W2a3ZokyDjO`fgWV&wpAUjauo0B_2udPCMkom% zC#Ad@g9!++2q-HdLy!iMaIkzCX8aEk@!$B_wUNtDhM7PNbuHHk$pd*mOOxQzX1VK7 z#V5XUW^2veyYgLl3SI09U9S|L)8Pdo6rLL;;Y0<0Xc8oXl0Y&^AWV>Q&sB0LDI_zR z5ylFA$){8#>_h9gb%IJ(7v)UEKpLSBAbsKEF`K3rUhx%vGAd0NjUCtb-0(w_zf ze6jZ%?qh87x$d|1sJ#u}vXTqwDhrgEH=E-Wj1kJ-@R5!Aqh$ycV1(U;toI(fT~36< zq{{U2zZe5JOiuBaNlFlkY_9SBtpBaX^#M+CRc4D-Du+Ppzzs17mhhkfIDpp^ zXTVAX5R3e{7*-1>IRf&+0Bt$s^I&&M0Yc02K4ww>H~1qF0(}4zH^ncu{urW!_RBwxw68-(Qp*6$HHdxaNT4S8M5J zEVPdmbYkj8AMi0z{5w#>(^NmQ>KyUD_-`Z2Iq<+`;$xv*M!ofUt2;UqyxDvX#1B%Q zJo(Zf^#=qGWyZt-4w)!pzVGc@bftReR+e+P{7hX@|CTP=xcA0EEgwanbp+@cp0&0u z$DOwl1hfv{(;xGe+#`)X>>-v-cnWdrp69>O^7v|q&-(j%`fZzjp&*$Wt%d&+x?+#a zetMG&>wfx;SRx#Sp}#+=*st5Bm)f`|f%GKZ7EHTYNr0<^wiWv zsla#1{nAc1 zvn79v*T|%h%;JqS5>Bq~uwrp$h|Hfe@79ZvPS!m3b^McmG!|ps_`}BFn$kW0`pSD^ zx@CT_z&MyN-WdIEe8nbj5SV*y5)O(|`i9b5<-^mG z&Hef|j;k;BMX$@6xv$a$7N^*JqbL(?g%z>QL&w#la(0d17xFz+E=l+vQBY3g-DonK zU2s2A<#Gx4Z4q5{X(tykjTUt@)z?JrijLRS#aXFc%Q`vEa7>bP zmL|R|5zh-3zsj?@NFmzC)YmjLjgHS#9>i&Q<8+{S-lUvU{xG!LiNh&pGe|6hg>}qK zOhDt!sf~%s{Gwsi%dqiavgZZ*@dhQtlnSyz1u>nAsC z$DrvaZ8WgW$Ivreh*$tZ(qsbLKprN|2+bvldXMPihHX+noqtYqwF&5yKA5G%SRsjW zLG($$Ht7ytJwTUdJP}18B+bYOzl9d5lHK$_^_2G!GO0bdt zHrP=}L?0YB^#C^Y5H^JYo5F-mVZo-bVN*Ckzljlf5JEXN5_K+0e$cr&&j%JWOpX4mT%E1ml)&A#)WkI`?jM2N9{t*kxhQ)dNb@?s2PV1fK`-)#z z!*p(UZj>cATdgJ+*ZUba^vMo~jt#LBP6y1Ux!)o3Z55)XJ8s;8ZMuFh(fP0C)l}dX zj(2>F%&cux;vXP{zG11K+PMNx#4S=G#WkFe+{faL231^!9uAJ~$bH6aWD;f%-7zbd z6h)E9)t7(1ie=ow;CuIOmn_}%=bP}S85l~`nVG17U1NHW9xNCqAMRvIInlAU$t+ds z{fH;$eR{g>vy)pX`K6zp8*O230XO4vFE=J-@V+;Q2t6LTdqp|1?_t_8CK3=3YD^hS zl|~ZegDeynzZPO~nq}NUpn7AY3Wdd#XkTl-9QEqCZPUr*XZ30ju+l9v3DLI}91W%D z!FeMP{u-Y&nkHOW&m@T;PCzP*;|NZnakU>dBW{d0BVt3zmbUQxjo#tGn`Mtzi7CTk zlQXs&whF4^-Poh-nuLuh{{r6PFX(V95jGE*3^+99rE+OfdL*X5L*+7hO`gMD zNu5hS^GLw_Qe|;vvrv4fZ63^k^LyS8oC;@xl=QZEeBCaSm=oMYGS^?(i z%Z%aW`cG4Q!8lR9DP%BuvD^+#E(U^rF>b9sv10dVmjR7FF{yp#9YF)eor{pD6itf2 zl*zA5Vr+ULq2nqe9P$i1utWYp8Wm@2!v(8J9x`N}6 zAk|j>7;~G&O zv5t`++0TjrO%jL|8iHmgpE&FAO7!bW{f?|$uwu3lddpyO865@M1no+K2GT$Sg;6Q7 zMhq6yE3ZZhf@DWHzcs&9Wbf>gV=0thi^MUao zJW+DaL-!hy5ELkWt(bTdbn@g-43~LuAb%0?_)*V2{qhX3 z6Lc@~ z5y4CuH}0%wrdg}KQhMFI9}R(>`>HEBifF*&**j$_c|*r2brH#@W1IV?4^!tx5^lWa zfCuUL5E%+&`f4R#z-TY$D8v3n&uBmA%bTJilcjefG%^B(57t~N2?(Vn3fsO!foC?RXmxsS94c<*z# z#-IJB0g!&k%J4#-omQ&f7+ApjD2nXURv41)gGNP7tds;Zeh=;7h5KI9XrxeeT!U>kXE2Lz@$AdzkSe-R# zWwsM`j(k~X>1!-PF(TLC{PSAx{Fu}uRa-0rFS}<>1YYN!o%+@1bfDZ;&$aB7bkW9#wd-*saaCL&@e%R2+)r^u^|dUi~&T@pvTZ4 zVrUQvGzbC>B8~Zi$Hscu{k=IoYKlTNX0h+&Ot3zikQZdzRFH#{n{j2c@wxq5761JX zGx79FG9V86kNvID>X~F9a@Am&v{a2_?BJe66(j@=63!4lIyBEo)mHtdy--xKehcVs zzZo?3i_yYLU{{9YFePM@&yPX0?|IqN;N4c$6&+dZyU5-sGLf?tyJVmw$-KC=Y z-B!}ZNAd#Nb9ROA!xVjKXs-{he<`r6x>c(@D8)+On0^t!WwMt;){g-KQQ&pruqOHa zAGEd^X>&Bq6N}}-?CWq>>j@GU>l(57IHNuA=AO43j`lHJkvco@a9f3sW>z}v7h3rebCAL z{7r0>`N6u>$B}^C`36s3l2c!Xr=>Ce$PVqzu-o9M5-9pw$Rc^NU>KD?7O7?c&?gGb zfd3Ya3z-R=>xa4UB(BzbqU;fXh-e(_xuXRv2X8cs)}8_sa7(&b=I1k0W|~RmR_Eb4 zIj?`dY-Qn@^s%BzMQeH_puq6{h2sIPE{#7mj?6rU#MFxuv!Y|uSFIX7hYdWr(jL?E z4{6sE)B^@h#UAlQF&eN!jW`wR40OmlSSm9}c#<#iJF*1=26yb&?;ONzbep<^Sf<5? zHTaI5h|su!xW>9bV8H1D+F?2nfCP>n%c6OoooAcgp4szwU;OUmGOu+nK1_+bSx>;2 zEqK|}75^p8vH3#Nah=QeHs`%`v%smgYW&``$S6z8)&1kigV+B=Q9ZbYZyd}4_nYGP z+etHoNK~2WU#!W6vZhXlP#1%PBpGkdW~U~b`+BNikux7dnh2SggxrJGfkj~{r^%LX5TC~3;h1vaNbGDJy=(c6O;Y}Z zq?~0g++mFR4bC6MIZLrB0%#4Pf+elF<)1)Gc`ini*L(pp@?B3Ax^xt};uN}O6uO?m zy8cUTfpyKmx}Jhf4ivlMKI0V+x@UcVYK~Vt!KuDag~SYBNcj9o_(BpYgI}4{B?eWz$e`4*){14hB|m5t^C<$~V|DDiQR%@a z=m=us*^AbsU3k=*$pPGGxOu0sNvX$Y7J=-ZwjAFW>3J<0&m%BA&y37E2*=8-w9@d~ zd)Ja%9`Ivfk^qy+_i5>n-+FBE&>oqR3%_^#FlO8bXh3`q42y>t9vt822;p|`ey!Lw z`1vmhPvRv$^O(u|CfBayCX~L7o)rYZO%mWA($!uE>H}=+RJUjn7<^P_* zU@7|RwCP{`_hI=w)j0_c1)uL;7umTc?&I|;s`>`!m-se*P^}%<>~3cNlJD~N(l7!+ z)MBt_otK3O6CQOE^1b||eLdtEUvoTnc_blx*w|y8acnN%$!Qz3#+2{Hk0UKYTY1>a^3)JNZQERvk54aOMSLuc`efU?P7ii} zn4Amm_6AF__3)V*w`tJuM|>&zZ+j@aUp``y|2XlL)XWsE|IaKsoBhXf<{A-~oMdj; z<61Ok9D|)aN!H(O6=6LUzaABR=AcLz)Zi(L&o;2@6#{=sQ0@i_*$S{~)4aD~T&?*| zwMCpO>-rViKLFLTcS8kv58J?f6*2LKn2%ZtORxCI)45*Bamlf`G+oB!$%Px^PrxWv zp;&%|825a%{Yc)_6|tX8ToUjn6)?)aOt)AQc^w@%+FXwIw%QhZ2ZKoQ}j!Zah7;E2Q`EG1%_~8V?u~hk;Cu5r4 zpzE5*)sV-x-|hbXas6V?y@5w?{L;oUMO!M0{rZc&a9ybJ1-haJVkYtwOuNQQ zTe_vnEm=3qm)$@Zw#^1ta-J*bi4j5%Gh~tgY5^E;mQ{OaRgbV zUQ63&T)pYTN0s#{J@?ZoOy)e-I}h9zB%+ose8Q%wcv+PNR>TWOzSPwUBJmwx1ooHa zC>6s!r%Kdoz*MJ4hk2tEn}f;3&m$er{brBoY{|4DB$jI!DCIHJUfzU^UEKNNb&aXv+oKcuNp6`kwr$E13n`6lg)Ft|oD zQS#D-)XO>;9OuB&?l=-9-AKK|)Q6h{VZsUs`|WJT4l4Oy0;)t&Ba%We3DC?bXlCj^ z!KkF$JD)3Rsy0cXvTt72A2H6S+Hhvxwc)|cu+Jyj2w`TpAR!5ABi+vT;~3J!)KAF8 z>3N|_RP-s0I-!%=LmDaiQ*KC8-|8hF{|~SI4~`GJ`wtTM)V18 z=M$?nLN^j}jb0yx44J0gDOkmqy_*z@SfwB5@zfIq6H>-2)zDbY^V-0DHUp89VLi3Wsxc~h8SLqB zIl7bv+=7(G&47oca&G6jl0}mnYSUj?*o5`DSsMPvD521Kf*QI!5#ql%!+hOB|M?MN zX7ioL??tC=v&3jmBwBRK#q#Tj*uP=WVpA7?J<6&Uo3Qz7rd3{OLMiQJmN8HMUlbeN za@X50jo~Q`$lrpu{q&nv@glEkIOzM=mslo#MXel6^zNuo8Olt3{wt$0V13)bBk=;a z;0LhpOdS!x*^x7NUoW9W{M~i(`c{7Wg>G6sPGO*@*A*F?*uOJanI7F1lAYZXeaiJ2 zSLJ(aEbVi`0fxp*dmVRWqe`*p*#>AoSF%H|{NWYn_M)@aP;lie<%HHy?U*_rJS5ZP zFM}rEs19lQi(C`5PO zSs8Seue!f_fFe0@&TStt&TTfZ6o|D3A3d)opYajLK#JT_b-{H8ie6?s$^2b@^l5tG zCJ^ap2e5GifGHY|X7Os<$;_^!I^Xo8@m4v_EUs`WH=>?uZj}^m3=-^OlYK{dSNzK{ zzg%*tOdqU`z-E|nEMl;Z40B~VTiJP8qT!Yq-rNzE<|2FDHgmg_u=&6GQlgqEn!=;W zY+*#6`gHLovC%X<(cFCZEQ5)*V%@y5D)Y(g`cgEnXG9tY$W&8fS-I&-Ub_jevs!qJ zXBVrBEtctsJGDQw4_V_iY_OAxA3v<)5%A@UPlT(FgnQ}_NV{u|oEkB2*)Fs0q>aYq zaO*Rw%A~LlCtQ|E3X~QK+Wb^ZYo1anP;=7V&fEg5qXEXBy69jvb1*9^aSM)T6$daD zfr7FB1j(Sl2^0+Jddubs1UQ08jq+)X0aAlVk~>01lr9>VpbSL}*51a}`glvf+jb_S z=D!91jXekq?izf|!W{%;A#s2o>vI58`vV%c0aX6=39sst7kyqQ$!_PFv>JkO3<0hn z1vS|~O$|`9_ygKlL0e=Z6qHkfasg0o4a%24pd*amq~6I jM+A@uWoevFAtg+T;9 z1!o0{*wyTop1uD}W-J<9#|o}f1+Q>q6nS?5coJCR0vK%2i-8s=)=&QiDAI5Y{{dm; zS-KO$`QW8+9~!Q&m%}2p9IgNG*lGA)=6gv6+E^dZj0MA{I4)2S?*?RtDZQ}^PTN{| z`}LSJt(1#-UA4r3pBsEkaUAJ-yuyxmJ=(4G`HC8>n~-&I-Q!Syh`3D89OZ{@{k zW)rws;_~gZ#qbKyDW6Px#iB#}_tmpsg04z$Zl0N9w7%de8Le~M>Z|jCMmwD|cH%g7 zo{&pqBzF^2)#{N_i6lJ?Er<0S;m7`5#C)0*=3f zbfdpaZYj#iJAr9q<_+`4%SvLt$N3wqL6hd6mKG{s6!=?A+r8`FItAXGo9TZEeExvv zHF|agm^_2$qy9f?9kG9v(Ii{x_4mjWcb-;eca1N|+&T2e@JJG8z5UE?t5ld%M;q;B zR3a+;F;Y5kNB_WtH! zxLQJ*nq4Qgyj+83s>ux%!TA4*9sPNbnrptD9#z4l^JBjEN?Gi$OLt1KP4?0`S(nw| zQH&Tl*DxP7Zg;zzQhI$-e>4KDV;9T&5-{~t_5s)}3|Bsx4M2a0y zNYUth3)bi*DV=iKkh`T{j}y1Ie1NeLFK)k^X8KICcUXD<^LNjj@MObZ0Aor1#`b4W zd1cC}QXwNe*&s^XUPpP#NkAnlJh^`A6Q+|Ytq2)t6{XqxR%XBKA)Q+9Tebn94R^op z$LHhVrr+Vo;m_QbQq=M{au|n`9;B)D4v+o<{CKCF6lk6MX!h&G6ljIp==ba9l|TwM zx7OuS?;XYh{XA8fa+*{H{X7Tl@$T1Ay}NA<@p2af-bYj`$0Okr#r@iqfU)C;x!7Bo zE|QWXKsWZb)qTw)>feTop*q?@`Z}`TTNa+F1H{AcjVm$ee!q%ea_esvNxX|H_08K1 z>*|@7eAt5z?FpG-`+bS8buI0;-_<;@&sO)f%YC7)sEV^6ko)s~5LiGR{|BdeD&0CM z1sK~AKkMWf(V{(rcl^yX7{@W1rL!s4RXds4A0)q*J!N;gbLLab97uqMI;_ZK_Z=+b zVL6n?L3wlgRBWwl3cKuTuZmv*nM0Zaw%VD%pQd)TFMx<0+E{xZzTDwp9swqkh7IY* zU6-5QHXa?iTVVEv#NuW}?5ymFj{Ork{U)XbD1EH?if)M&S4At;=L;FacMyLX%**lb zwhcSnJZ^*cnin5O=}(={)E0e;)JKzAJGhpo)@0YtwU#$fCmF8uW#DRfT1DZc94?A^?hZIlwX zu6TBTAm{U@EwdrvJIzHok)~VuLEJ7fl`8K5K#3#2jj|0m38d8!FNw;A(@Ra;8!YM4 zP5A`)IM%$QTb~?GdvQS_wx(f&e9>ufR(9O5Pv$eaPr7nF9?>V0M^n#fEVDm{v%D+D zbC0PXzuUunJjTX7IGwm2K0kb(7T1fze&xiWxwRR@brhEXsn{BN<{e@zBKG;%vvInz z+SaGC)Mq^(!)wAMRC;9~xmnu6^50^7u1Or)x7cQ?oJElW-GW3s)cJi&t0caYr3 zOSF6*tlC&q5q2V~|BgV(!5%Y0BN9IC%0I%2R4uB(dB+GXC5h4}^fN~E@xeB!pw2{) z+aN@W0_-m%^!fu_`Xu7+JfiGcfes3#fJ8+g`jms(Uc)w76;|1x&cEszRym-~PawA; zh!jQGUw$gXSD@k6srRxV=^Y{=`h$(pXTdLDCHuf@n2^LVneim~CkZ!+Y?=Vm#S@UW z#6U7Y74QG4+^W9Z)lU35rXxZ1r`L>M-d%!9h^!2+GWT^i8lNelT8!5lE(?fqQ;8+9xrC_DBul_=l{OxgR>JO8PVm|}8&j9=o z!Q8}{VLzq{R&6b-46rnUEihMAWP6asnGeXTi~=CObOb0AzK7Qbjwb(k=sr!w4hCc7 z#NjK@lK!u{D^p2DjV>_4n@ObGaO2H)-e%REWnn#Q_0iX}6+!7!XYp6gg)x`R>6mU? zGm8M*ZHW|jt#0}I35(j9<@;4?5I2Eg*!Uy;TdU-s#@ZGARW6Tx6hrsj7JdAE^F+In zeY`}=Jz2L=u z4eY7flb1~1eK-XjmS)7da@v$(hl~<=1jv9)&clH4f)~{D3N`;%h5COg^9tIO%iLxV z?N3$S_!U3?r=SD6Ve;N;iKi=z{7=AjyyelSSI}YZs1?{OKmERwZ;G4fc@F2B)jWd9 zxE1v$?vHp{8(Cua?{v1bTL79*%5SURrv5Ugb;$n;Q21`4TmO9e^a)HRlLvNMBE7$? zoj-lmUQF9mAF(46nLoOcoGI{;PF;xJGx@eH>5ZYy`}WMNmX6VeH^z@}t>zt){tWaG z;W`OD{b74bU>RX^xo0d?vuw!NBYHV668ZXSe)5<%ec}vJP!AhsvF0hKZtPl=?d6;h zFiHbNylfkU#JZ!WdQRv93MvQtP%zUc80)fcdC_J6oG?k-5C=!mFJjtp%8ErDD!JQZ@gEqDXJnVqk z{YfBM^Cl$>fCHY?p6G|Hl7H&*-6%iA<5lb0Vtt5<+c!jWw-*G2a_<{qgie4x@rniFj{1`!-mMF6-|VGJ^b{xtC#to{-J2*-ZA~@x+qIzWws7naJ!))(e_9W>MnR({G|T2x3B+4CxeI# zD^!I352(fr6^YP_O6DbxN+za=Opc(4O7?gXnM@3hML<(BYuyWV3IcAhf&TOp6+uV6 z%}rCMQ0w)kzJBkur_;%-e0}}*+MAntm=v3vs-H09bE)zU3o-u>Q(qZS#rOOT2qGbf zbO_Sj-3Zd%T_PaWT_OU~-Q97y`|S7kfAhRpX6Brk&&)aO-Fxot z*_m-;cv^BsZlv4l`2i7lDnhhv*5r@Q18X3I}0l z{>&f4yqblx1}_YmOgLG=O0gozLaaZ_R$~m$-HdB~-Y{uz96j%{%>t)PXRgPQ)9Y94 z51|y~__>A`lr>}KvobZ~d!%1JlxI^aXH@%>ijFMT(J-$mLqR7v_XH-5oD5%HVQbryHzZ@xE- zC1a7+puKC3Q1~HOKkYU0m6qk1n$q_WuS>B8Me<`|!r3C9Z1^!rk1V`PC+3p0^kdF_ z7|9lKb^jLy?RHa#!fQPt#c)yl!J(a49B==b^fO)om30E`T^|n{=IfEJ{=1uUw*EWe z;h{B{@xGI1-WjAWGjsYYG_m$h*KPfzaWtwzr7U<7kRlejv2Yun)UveM%sQ;K6vY>F zY%t5_45c`2mLp0a8>07Cd6QVUcqy-IikIkDaWx_<^ons3dc}A|ieAK?vpvLOS5xZ$ zP{GAFB&)pSDJg&EVTix$TN%yGU?=mKrBFKW$XhZ0k@o1eSyE-6N^B3d*Nc3H1`Eky z{vN?^vTbU>r=0$+?M0rjpuVSc+9gavsZ|`Wv%4!$o;w z9lj*hpMM$%zyE8)^CZ<156eSdkJ*0XM2#3=OPYts_ge2=WX=J*%l%Tl%-3KfV*gF7 zVJczgVJgkf;D?o|emE7qewcu%UJe^Y|1&-GjjdU*ogYY{BbPxo_;B6gHfLc zv(&$+mqn5qrXocjrmBl0mUvq~YzoFM2F@(*HTipV{p}(2dO2xn5HE=3?Nc)17s@CY z0SLkVIU$eQhlV4R#iazCUTJWEJc#r#9& zJvBQuF(D-<)o2dJE@wLXNZ|m4U2a3jQ~7Ub$J{X1AKDN3L$hQ0+s(<;}zA%&B?c)Npcd!K5{W}@qdxX0p0JRLh=gZ1;b)fgy=Ec%OC#w~(RP_*^X=xMff%EBnqY2l`$Tda; z@(&M!khE2|aoHoo@!l02M!dJAj!af|5JxS>r0*tDndZt4un@?%^wiCQ$9njPEY6Dm zGfL~{|1v^(|BCemt-nS*jY&!Z&0$|W4c4es;)2SD#C>XN`AXHZo;U`1nywI|Dr!dVisZI0qCWxM{2r(pOc&g7K9&UNHg#<5*tC{DL0JX=O6lG1VPLMD+N`en zWNA&|5YGpZYP#}8#C?L19JvLa41~JlH3fe+Ra~k}G&P=CQ^xqmTkG8dcqmQ{lgx25 zHu7W!h${enR%>@h5J(k#$>b2St5pitTfi_B_C$>mwSU`+Uj`3A9|>Az}5ip zWl!f=VKrEHh|T4w^VOQJiCGC{(buhIuYDG3K7DZjoBUqtWZg;0I5wEorHE5}Mvu*7 z9DYFkY`u?B8pRq^tf-FGMqzht1-W*pdAewpxUG+hJH2#fzui-8cGDGu1$VZY{x;6? z34OM-=#qce9P{ip>Gj7+Q!LG2b&C7hV?p7-yT(HqI1*8QEIQG`z1D;`EXw^ct=9Yp zb?hi@f7p`a1HL7m&h^;b-lj6FG1D!0wteIL|ei18HB!@=@qVT`KsD4SX{FYHSzr z6of+f62lRbz=)WogqhOS~ zAE^=udI5;F2qA6tZMN!f6z z`Ng*Yl>6=ws-~=$M2Hw_k{0sFD&0uM0VsNi7}H1u>nN3tl$70=q0zr?&-HRj8^ea; z)j1dYbk#8D?w@hj!Izb$YQv+8i#aXFffwOO48P#q?>}w{ zs@*iTW@>8l9M)rc1UT@+)|OHMRcPa|&%e}|nM(&Ylt+o&VP`YwVb)~!u}7e2$y4Y< zvpI!N=D(5b3xmE~b*1sRuBv}|hguf?UB&gfDmiA=r{a?)Sw7(pmRr2wd7R6U*M|K` zBxJJVd8?C8FQlVo#=b2~sHsO%=!IXEO%k(WUqyIggWXx79xqrTvtpHjvclcVCQJOD zt-JRwkmU&}isgy)rT(oLf&Q%uNJ%~~ZTm>hEoH9u-F;`#Ex9}Iq#P5Sq#6_Le?Qhq zM>%F3modwA&vwAX7ek^$1fQp86r`Qd6M}AW?g9bp`94<=Q;S_9i@9)h`K=gR5_sC= z6-9hvYAO{C?b6T{cIgA;=qm1gGEHCl&;QM5UxUVAaUAC&Du6xxqzQ*K3$W&hz|DUT zMsFPaBVN2>$A{5B+HKZ~F_z#UdTKq#K_tvJz%Q9+Rj*6PQqGl?UbVeB4D!7N)*${p znE&N@nAPfFBE8^DTQozqWq-HtN>mg zzLkM-j}Ez&QPyv=9m;An)H?(WwxF>9M?xcJm1PvT@mJ#7bw0Bmw=!+#2eg(qdS9t) z*A>iqa=L>QdoY&KSy~7>x;wk=-bMeyk}+$IW|8wR;vZMz!x+*9WUNS29w9+J$mz3YBn0COaxT zcWE)Sn^m-bR{1Df62@u$oKn!RSBlm-hFZ_E+qGvc;a8bTTs)#IO!zj59m6M?wn>C5 zrXR-J_VEo$!M*lyK!NBIctE-Y^1QYIPO?jgb_-M&?-F8Eis={(qex>6etHiKS&pvD zfN%L^aJArG0`pqi05$$LAkTCO*q}T>=X@f;eA$l7=TVU`NP7^>)Sfxu|Na0@zwpZt zJj)wV$|PXo{#xz^pEP6`kKvCta0;~+_}ZJoDu7iL?qrC2r8-y&x1PL8!ZTc*>hLmj zOTWm`u%TV>{D za;jA_EqZ4=9=T=Hy zz;E&~%?PC&|AvmD);MB572c{nL}`XyeyQ93c+0p` zQOmxQ^xVYo;~T{&z}HcgZM8dQ-QkkLtp;BZ-=n|So3nJ@^T67G=KRgm zOmzdxF{15sLp-)P1?O{vMkKVA8rYRtNhV)J(FQ24z|&a(q#yN-Q7w8Egzms4rC zjJhgSDK)32MMg`FaZi?8CML#Rp-*m;cJOA#hL*KI)?g_DzR>nndmaEf=}dO&l|cpQ zSG@+(?6DteNLNy_#2-fULI~py3)i`KH2*3eRf}OS2#{2R8)=WEDWHz+~~hZCc#}Avz%9j z$T=S2vT8o3x=lJXj8He8Y|FGhdd+J*CkgMOejY9lRG->Gy?J2xyM7c;L00>oxBo%X z-^TAMWm@s+a%m;oqvRRR`@o(((KchgwYnU;^k{m(W9{a+M_ZuVJ7c{Ys(O;PSTeuA z9Nr&4TkT-sO5HxE90PlHpdGDgCUc|Gjkv%}w%~iM-DY}|=);oJKa`8UzUdNnE-GF= za6_`3u}1)pJ6b8gS)+gAnV%m@h&#I@CWKs3t*zB2A3=V9khf`(R$1`*KA}Ob*D#joQX`ZVBk7^#Z-?{X8^@M4CI62u)@;9BdGhf7hAXy_jL|;{ zxs1pzo-Fz<5L zDO>8d&mTe2)`^I*>lRmQRozNbi_;_J*9{D!niGWon)e@OnI0n%#a+}n8w+07%yiWv z$V%fN1kl-`i3FmA7p8XO=>;&=BIueS)^{U)2}IdP!Z=4Ec#p}_O{5pVRf}NFGq3tS z#sJCQ3~{lH_WZWC3IXACUqcPtLB_fG4k2eDen$|6wBB>9 zh39YJ>WkzV85U%fa@zPvMeRGr7A?1`XhrS$X`cW+qn~YQ@H<4iI?6FSa?<+o&05Ma zC6I~U02v#Q>32r)uvJc|qf{gXrR#W*@c@zUM;78s8iQWz|NM_(>inUmY*GC)h;ts7 zYtSf;naEXJFuE1qYcYN!5XL#LEHY?B=uG5l9A2l3bU@_#ZpvcZEijsMJ`n71{T$tD z1zIS_bM6XB^rYMSjQN}6J1~b+wi3BIWJGe#PlC*i+(5cLwKujV)}WE_uEn^v%0T*& z>jRPNGXuUwH5SMiOPrYt^`=)mpbr||?SKwe@uOR-M~2ev=RoFk0<;G}H^woO3>k$evb{YS8E%$DBf`OmwS> z8prNq6F6c&FgjODFfetFT_LT(^dlA&i)wRl(sZx>g9Uxj#euM7gGRCzl_^SKcrgQD zFb15vi!UvzK1U9uGmn9@iU9ha1EJW5iCn9GnvJ{VMRV$2tELBW?nc7VG&u*2`2RB> zA#xQB1;@Z^F^>O?UuqNGS_tjciUC!1)9e3JMc3pXG&0S<#{hEpCrOn?poz5dN_h^sbPE8go zhO=TWr`eE83EKBt=!8T!L2qaG7_847@{v)~Ti@&nE7&~ux$KvmCA;(U51-tfshbEe zkG$h!@*Q#@cKIRL{xGSyxl?GGC*K}XS@nQ1`WdfT!&+$X2 z=fQeJhOsK-?+WMRcA3==)y5X0=QEr6KR;HmKIUC?`aX`U9u-dMc*qO*_^$OhOdN>c}?;=k+Z)shvpYQr{N!pp1xY)9m3Zp9;f?RcM zoWM#~l4^LaMknqZh+PNnZa0?+POGYa35YW`?^sSTTjpKmWK--|x4d-=nu@!qbfvY@ zOoN^3!aHUx4r;+te2to0E`6@dpC<`Z@Bfias-QShxVN+=uYYC!tt!6yfoR3d%tMw? zqV_&Z`wfejcf3hnsJ+W=zO~|dI+uh~n|9q)r<#juFWmbr!9r3kH21|9BJ=rblux64 zX#)sl)H(r7dL(ljUb8Dn)@sl%@rc>LCbZ9?-s*WknRKMQ}&Cc`gb5PdyG)zyD8N0#tu52dcla z1u-cnuHQ-&rv^}g)C{E5R=y2yIktrPNVvP6aXGe@m`S$K3ebpD&*KK8ojQ@n@8`g4 zet!BzCwUC9>Diz3iz*f1388HEt=d5J8UHnTH$Hclu<8~8Ssr6~_M#Cm%9>vK?J#`D?KvBC6E>@{Sq z(tPEQY>DNfER3FJVT}&{huao5e0}_@AN<4>mI8`<4@0yiC2`8~iV93&Fi3uZ8jN18 z<_5wxbsrbC^SVYb)qWXpb^~wt|KsWW*|K@k5afWpkNXeGZTkJW60Tp>exGJv@9m?= zESUwxH@dE0zieYa!9Hp7r!Be(^XGe#GIZ5exs9(hfSmUDsoE+VvBEma$jcH?`1%kv zG5HfK9Y2$naKy$De<;H1ulnL68|)K^Lg=H0ZCgnJC^d2C8v7TCH(zyEP|Nq0RcXogtJz!vZIJ6EJC)IP!Xx1z2cqAJE#FCh` zfG)JlVM{|MT0x$#G-Z)6|3Lf^=0j6rX&FbvUFX0k3cLv}K%Szw0$wTuDbH7NieVaA z#j~^5^_M_?{u|)w2!bT}>jtdHek{~HN|Tz$vOQZ%D@)J z9=#=iT~J>ax!-S$o84C(i7!l-e-qB)ICW320X;mv?vEAr7}v+9E}9;X*3dYk8nKxl z!o*_v^{5y4_T|u0DHN9s0n(4>S<&8ukcng(6@%U*&WREW5>;FD?0Y8VKwQWGo&X;c zGW2iID^=GwAM7O}J|I^i(!RujTfKO)M(kkr)E$+CiSvmH({j_(^758f-a~MnfXA@= zrBlEV9YP-i5c5c`R(fl0UzJUqDtVFb03PQyul%w3i^?01$iNly^hGHR9qU z#OF8A8C3y5^On^+ItOGs8+IpO+KzGE5fWpMyoPM$lKHpZR>88L^%H3Q?smer3pCvgJ}UKxk{duw=8RY@UN z!t{}AlcR5vEra{dG&WHJ01VEaUP!1Iy$RX$hfch|fe)0$1&>8eQoBQIb`@zOYnzvK zq=!>!UDnmU_l@3efXVLS^2s4PX^LdW3$M_|Rh6hJLO(TM*RpGm3+KMHoo)#Y#o)kY zQ&m(c;-@-t^*V~9ADFimD`lU==LSF~rvhZ+q>qO(%Qp`@_@#|<=+Rn)oC?D$uDFnm za`@%?|9V{nvA#g!@Aj{p`t9=-)hK5^5H9u=<9KK>el^%WJfE0Zm*ne#Rn=-kF=r})smF9$*2n{_klAGsW zF%dZFrk&yV=QJ3?97cV5&kid30jw-aqcdJ(nCTwgMqE?TzEC05k%pTe6798lBaOm~ zR5|jSr+4LF$G$%K=m2nve5Q-_iL#xiA1T^HVR9s$U1D-%0g|{)=8J4-eN0g2Ht2)Y zG^t1X=_r~QWYXC{1`pBDI-04L=oRdh;pcu4{F@Kh1n=ggnKJi1>7Kped!K66f0uysZ-&b6^E%;szkWmo z$wzYWpo*${X|9{-%6q?`>XL>_hKn~4MqBE{nVCecv3LLl8GjejV*pV2L;$oYJb-Bz zOf_l)|NAjSlduM8Bnxt<9;505jxR-7J#WXYz~nmkzl{6@ub=`JG8zUhkGRk8;$i`O zb)ftNFlUFcx3<8h4CeLoV;e#q)@7?Gdi)dHz0&57(9@TU^V=%^G`i7ZOZbv@xO-AH zo|B4>1){|=y{7YoIPAW#XIB*LeDct3Up_hs7e)=T$5U!&Q{y~|6h^hHu;8(}=h+My zd_5Kb=vT()W+v*(z-r2FfJX`Nj))8icLR(C!WaXjtX`cZ<|bxnDL}W>pVu%y++6|P z`P~vRz!Dk&X?PDV)mYbdX){Wjd8$00ux%Zh^;Qb>$=})`IPKPu{yUzaz)gq!d9h%K zy2Rbw{LA93^2R*9`LF?t+Yy(`HOh@lSGDp>C9|OOL!s+}@`yCV0_ntKN8%e^i0NxFI9o+(Yi|fP44Xk zN;w1*`e%a)pN#oEnoMX->sFm6iXYe7BgBS`OJ3#yEPSx@`s?LL>O!_3h)eq^F`u^l zJr?MSu(_v5yDYNY*SdMG7W*EXl8M%q))ZKmxzh>u#M2B+Vyz8Mj2IimD8v27O648+ zZg7@VR8p_6f)GLT%1ttb$*35l9&s)EC?UW|EL|v!xZ0zF%vQ z7{85UzS7`-#)mB@?@Iq}i1k zY9!e^SIe0zh4zp+;0`5`k(>9?I4w*Fh8Uk zd3Hy^DF)4)U8^-chSc$9O`jtr)z&ZxpWli}W@@=lKk$aIyt58F&X$&&v2tIi=L{hm zNpv2>bZ%LboeAH@KqxQ*-XY1Z{)vkequbjc58v z1veGPbmWp$=)+r+Op{DjXH2q1c;quLQ{%#E`s?bG8jRn0oQ~JL6xn@;<~dG=<2epw z1PsM)`>c=6+MV*f;@>pfkIMb?53-hMWJS6`rH7m@jT;>7K zzgth#x_i^iQs&b^%<^Kwst%JUu!n0JktH>-$yzP$mqi`s?9gR%`BWi!kNc#Yv~1Bgi-@fKh?2~FzaV%Y zJ}H14+Kq%4h$4W55syq@hQ=a+s~13Di=b|XSXxG_hC;B8Zi1k1hS;=@G7SnCCJ5$c zh=c1WYBJ@>f!wtSwq}TPYEF|$YED?V>Q1K?>Q1|#WP&8#9ZnSJ1xmihzOf<-_P!ED zyV*6vx!J`;yRo9jxv`22@z$g-Ts{79hDf09^eA7(4+P^gAX9+|L+E%&DP{cZW+>=^ zO0!vdK}gdP-mb4f5P>LfSAt+~S8kAE277C!f_~qqJNeTTtSaHCJF&&9Io)=H){SNS z&$US3CD?RwwfaE0vy2~tukM7E55+bky0H=_xS`W=ctKONDl(bB`VoTSDbxAD3S#2u zPf*gJ;A^l(6Sszm0z2)&j|%{N#3S-sp!Apjkc9{kfDi@8R#hIIRQUY} zq}^VfWr*>WXXirVdjR+z1<{+zzd+g#cr$BzD$D$YnY%boMMbnh9>&v=r})(g>sE`f z{Z?Ok^P>wEcW@t*GRN20t;cO`zT4CsZ`2*ZFw{{tc;l6k2?cX|9slL>9MIodVY;WS zJ~>Qo4S#htzC@?H;!Rh%*!G94-P2I-MnBJ)m#zy}hue}-*M2>jqb7aC=)`@nR*2Pe zvi)Mr%Wb9Y>kZTEv5NVh1cqnLY`c$Z{Jaljkkw_g=BV-w*~#5K9oXlYonL?Wy?5fv zoq@+kx+2J|s8dUc)8aFRF?WsAO&6>9^uubGt>Xqfw0hvdbvHE)#keA;eY{F_obQ3v z(A=?sabni}(%$OXcXB)L%M9x0m+44FCVx?Hsw}zg=?;Tmz^&1p4@$9b%F8jH3tGEY zCXU)}QI~Jnv!9!*gIhCNx0a}}LeP|M|8 z+3|I4Un}Nqd*jLj-vvp0Hw~71`^2_+vmy9%IqZ^6iI@!hI?o{sEFY0_~eNspLHIDj;vQ{b>YU9)aQ7zmf{YL zGORgm7x)5b{37%w(U;HB;1S1>e75vC`OLakt!pdv@A?YEnscAsxl8g{i#e~S@aWwL z*{FvF(WpnR)C1gweDsd3rAP1sq8-}X{OGe!GQ?}MAzW}TZ|q?SOOX$#$rs`oYJ<&x zilW6oo*}ZT4FY9S#1O?eE;z=!(R-&GhAr{wf@r9%3^5RjWeF4H9`*bX#^s+zYzg~< zJ9>|Am3(1KmwZu^p+4veQaaJmdjzmu_dxVL_UQdbPy|4HkD|MPj|^K`N9u#7Kkfn) z7`A{aDdM&9XfA)|PSj*cV(7;(E_gi%`U3*s=iS29O$piBN?IU5Dd{GYu zsSHj++`ec&Cx{1PK)_hDlo__BKsv=;5WQwDL!4&`PQfx5I~2_l#tb?uperSMhi|-W zG8Z!s>sO}(tPz`VLI9;RF zw}6so7%IuoXKUgyO8}bx9th}tUTcJl-zEZ$r@+da8&g;AL+JaJ6V~TzkGasp^k&Ix zEbgNNCgrjU4>H0pKfmy#=Xm>MW%nJW8(%{*j7-iP+%2G-tySWM9=aZxKN*C_$*m_3 zMXpk_DjZh|Dw-VIbB#NUbp7VI{9+<%_U%f;9}Ql}?st3&J+g0(n*D40Tl;Kf!RF`D zoRVpcMu_|$f7S9^pMCXGGz6+2WB~$ige0~A39{jT!MXb0GMj3I!$@y}14CFaX+E2q z%@FL$Rq#tH@|8;X z%%qu&?Vf|AZ5SR-_S+I;CvJsyCnbiH*nRcI6Q2JmO-9aOF;WSzDzu+?C%cb<_NfQz zi=Bm1w)BVUi!sv$zeuZPZ3)YzZ0EJ(W+rVUZReHZX4ra*<%N|OZD*>-i&vzu3HP`} zh<5jx5u#Kv^UrVR`joJnWRhMXr#MrBNsG@U#~1eYha3!zl49pvUN7I6xWM;-^u z+6`LL|7qF--|M z8KQ#3%u_;Mnqg%Q{tiCASI!Rfi7S}7;zBI(={i|^u#$)8|>lcIlxot=M;nV zn}iE-sjme$6x&0;9-y#Yh~aRaMXTc%_ABs?MMVH?s9Z^!4)N>Sr=E8HPDeGoVe#7a zzJl}}a#sqIQ=AsPEseXDWCD!YPk|(9Y#wYUMuXbqL-W%NW_3MnW+}4j@z3k`W&>51 zo;+ak(45rI<81VB3Ks~BUF`@k>4L2_!F{ka^Wp84_u|_CFWmIv0gAg=&-U!Uv)fmi z+0ev4M7>h#z5i^l4=yeR~f@O{55(ybyl^^?&on^L}nmP zd5Vl6H#Ix?@uM(67zGfj1MkReUiCgYO$tz1eja@l1EgJEon1UqK+=AzxYV6y~+#l1GKEOr*;T zuepGLH_rxui&msaN?>RY-6j+UVY>8pXz+>wtA`5Il)=QQ7+H_d=w48O|HjIJ|6osH zkb;HB{;xew|2*p^N~ocJE?5~}#^1Zkkh!)@2r#=&MwEQ?UO8V-av#Up z?UyWlwtALyfSOWrAEMfY#XNi(a&Rik+07;tX2;Qr0mM1sraxH=bz1CTrdxnzA3zv6 z41{5T_w%BLfp+x&N_Yi1oWuPtaomyh9}v>&0@b}t_=CYof^so=iOao zO~lu7u2jj(kUtx3fU8rrphvPD%&ynT>j>P0o4&GWRS-R_&g-0+)7_V?&v4OE3~Sof zKAG=lEv!K{-91G| zLA?^rob*O-p!@kWJj+z{$DR3@9nVd$&7Xdw`B$x%>*e;zl)_=7Qn=QPeW~9^e3$K3 z1uS0iFFVnRQ?+uZ?F(kAM%wk$8prG;`Mqo1rgpf;+lw+9^-iVmJ3!#KQn#nQ8N;|- z$o{FYIOyl%Ev}8TTbVFK9Lr36(6FskGoheU!|5`2V&SW@uf(*`kZy5f&ztV%>519#q3mrEq5j7lV6ZF5#jeD5{) zivIqX{H0@oNsowM3oW7U1;HS;w&Z6_b=r@EI7|f*B1{F2WK0DgK)MUkSP>>l53H1F ziN~`(0V&$+pdf{PhA@S)z#xTTr8D#6g115}MQTK%M&X_%abEZ8`6H-oi6=9vhaU|NalZ%o(%0aZPDAz{-U?&>uPAP zH`LMoZmOZVmELhos-y94s-taG3v=xLAG!S>S$|O$(lot>+7+O%??9X~Eryb!Rt>t1 z2~w~r18HiI0)rChJ`;2w0=iFu-^iLrh=diAdLoPqBsQ`x-bPBT#)jBloA|kDhQqn zf`@`MUY54sr7XlGAAD$6bGpsN0PA@>AOy&k&|AfDVX?dc46!4!MBoiHWht?*Z)NT0j z-JY{Cp2qz9GKH-p*Eutz2j*(jI70;DAB*Ya{iHNX?iU!!O>=TJfH~G;IAp&BxyJEJ zFVtS{MGe2TDQp`$lb>{YY^LevKD^6(Jk1Dg=p!Jrd@!fi-x^CR*~k&Ml*JTep%rUR zS3Yd{KyvU{m%J>Im!5taPmxvYSjaVjo3?3l1q}kiFQ2FBXXpXs_(tXMQ_qoNlhn zjFGM;(5V{HDtlQ(@_Zkkik8|@h_&>8^D4L-SAvLTZ{P|Ah2W-_czqnqVX!J8NJe(9 zl}KqAAGDrGN7TjRsT9ibnmox|DR}5J!=H6d;>p=WO6B0;&w78}a}h88*Uu03Q>uXd z>HqZ~{?{b@Uy~}>wD4ck<9|)!ugP%<^Ls2dUxJcR;LErOVYQzhjgS~M<3p0m8DAzu z_`J7tDIi!5(V*L8O6J1xC`gWI!7NegcVLjjng36~)}UK%CR={R_=n1_CnuguSwe9c zoe}rc5bN}NGTI$0D!-&oH{JlBI7#%b99b$uKSkw^V1tB^v)ae&|` zU#@}sA6AkE?PlTZTmBLdo(c42%xEbT4MHqwj9w$sz3cX4w9JeF?JN{BG@RF5zFMQl z^q2Gm8ES%|XBlj+CNp`N&mb;mYm^?Pe?##2k$gY{TAeUjQdO%m;yObVGF~C8jeax> z&?rsgJV5v=!H64(3^uZWW6wuq>3&h7fJ{|J%YjIcAEZHtLp$}(Txt88tf~{t$){`MdF-&n_i$5A@RBq*%360mjcb+ zfo9jB*?Jgg_5m~_Dol=$z-d$^By06}pXY{7vlpP!U61K-?eL8B>i5i63Dh_&`0N&V zmynx~Ea>~m3fzCG57sDu23~sy>i`byHxyf}^DLK8y|YYbTl2=C+^H+O%OCPv5d80e zw#zsF``z)9uWvk`z3>oDQBd-4b@2-bYU&UhgHdyFd|wPN<=ILLf(3U(E_*FJ0b(JW_ZcQn0?sIbCkO zeZwgW%!_=XHP$lf(4PD4D&LhEluacQ#U03+HMFfF=BeXx_ViRUJbbSKyo(zLkeBm- z3ot<{vnj0RvzKPIRjd1z5wGOW$0_eGjpYe%v+!B8Rj*U*`h666x7z9r#L71NUl4`4 z2&bodBe+w%+rlIM$DiAa8^523so4@(fKCv~tVdLDX1);gy#RwClzsBAu|6w(@c~2` zSSo-wiwn5W6Z#{bCuonLu4-R?P{k*VVTQm7i?OlT`T@)yu%x&wT=yS%aaI~@TzVTP z1WQaZtx%l$62Gc{Q}^pj1r2N7W9hePhwN|nVdTdNZf?azXEL>>lNg_NbhGM*xa=B+ zd_v_(oP2dWEd;lZqXZCY5t}ON)sVlP31Ph_L!9aAc8;=lC(rSSjOx%Mz%V|||-5hgCV z1>isOcC#Id!!RxYvnn7=2Pn9>x2}d4G1=SJ?D8Pc6(brvhYe=)?pFSVEP$tQ62>z^ z)+IKCS z{c1mK0XxHhK6sDINv}F9V*A&ZahEvNi6P@=_s5*I$1x3fgOllAv!7tbn*Z0FdW*58 z(aA#@;BKa}_nc-1eNi9B{#7kmI1`ISoeTDFmuusdRNobTA?6(SAXgDJT zKG?(fPsLboo7j}JW=u`T@2RZ>cTN>J)G17N`5Ghx`5Id`Dme8PoGotx^0@>`v*_D!DCk*_VR?zY7wREOvsz8nU7)qU`ZKL_|c+ z-c4?+<|x!*|H7p__@=Gi2}qi)`5;wpd?^p(%@z|XZM%ui)3PLHC!wjZ7Yz#)_G0^eXT^tQv3To6KtPkbGE+H$PiS>G0u5<;ZG`7~ZUKB1TaFDdk(#Gi`w-uv!1-3Ak|Q*90(9mU{uc zbJyDSBOZK7*gKYc-#QH{WYn@ZR|8`H0jOF5I7<1c>qz*YkV(tlP~S`BG4;H{`$UKT zeZdGkDwxpgl<>dn6-G){=7C36q$JBnY05bxX-XYkY06y{Rx|WQWL>p*u=L|+JaXc0 zBBtS2lWK~!q3GN}{0!r^ErCVA+6oaH0#_2{F#^<;h#;v#* z#&kI#1AHn~bMf{%`HF|?P z6?y{)&UbykU&zer>EFQ^n4%cQ4>=je*?#+UtAefv@xJ1H|0n(OJMlY5JWcg(>EDMHwcSgXex|X7 zL9g>5p`6UL+<((8SWv<4ZxEm+U&7&8w)Ie47reB&vd(|o^vWiwwK#L!jKlUYR}B}N z(d8WaSF`Ihms6QsAWTd;nrl~=@$7NreQet-eN2oRD0=k=IkhvB!KSq&GQ+UY@WqNK zSpcq{SlRZm203b zs=ewuV5-H%AbsKcz>d1H5c%(0XPCz}$A_&wReJH}YkTVZGqs;RZIY;zAv}_bYVGwm z(E^S>p;NYGlO*m9(wC?%PD{UGiEccE3#P5XB+3oApTDJrNcENdAj^#QUHVSCj!0tt zSD<20e=M;+YAZ*P#gDy^f8mxOX|CB5uDpCmg{Cio1a21MG8gm zePxP0+2vjrv&`)g^)s@Lv+v`Wo7)n@)YYLS&T&gk_K!a{*u-PAi5HjzgFG?t=kc*jrY9t(n%u&s z-sEF5X#smRCx9*c0~Gtq6+}_e>6M}ixlZb-aB=m=c(FLmJ>%$e1jSBrpUfuhh@6VChd|HK_8=vAT=!G8sWOgULhQCp1^wbo+X<+z3 z`#Ud?q5_ipuVVRHkZpOnO@Cr)jb+)OL$1?LUFWQ$P2TL*uUH7vltIhjv2cCCvO~?w zAog(9uIn(H&-80~VpW?=ldds@Jio1(_mPSU$!{(PCUwUYltclz(YJognYhC>Hx%+c zyAt~@cTqHx*EG;U$}*1Yyt!XmH-4j1SEq@x$gYoUrdOhg)w}Kux9T zk_X5n`kQEQxN5HR)7xz3UD0|^mHA7A5kBVy#gAP7KZ3BcB# zKn`HgegFQWdNKlz9|Cf88EpVEfkcD>l9Cx>b{Q>#w0-s~x!)mg5y4j{bj%QIyOC@H zQPvAn0|@o1iZyUKG6U-D5iJCngnvVr1P!!8w}SD*EgrN6w!VFwyelU+JkeA`ykV>i zIMp7oUqL<*f#;=g^``3xL5sE@^4^GE1GnrL&Odh``;XA$@#J6I#XQ1Z@E$};zA30y zlIG%7SPjuKvu(2T7Xv{NKmxo+4i|?n!q;|4fVzp2Z9P}UH(++M3=?4b_bP0oBdS9; z$@SAortE-FYnnreXri?jx8NE)cyNM*K=2SC1PJad4uKFLxGwHaAh^2|+#Q0u zySuyW&fWLct@?g^RePpSpO%`L+U@N=@|+(ST4Y0fr!qmjk%}>IB1<0ALP{QsLQ337 zb>8*z&z2RNEAapExAGpUIxJqKo$CgS->Jq2);dHN2LlDsed~F>zFB*{!TCoop?xnSa`1gGOT}7-X*=w$`0qGc z;l%3pH3IClH1Ies%yecajC)l^QIg9pu*{!+hmD(QYSaM@1< z_E|N{MBD8hb~~fu2zLIwr64zD(Mj8|J;5^8{id9@jiHG}h(Vx6Fd@%CRx1OVQ!hv~xOw?DY+wrV^^?=xlsfbko!ML8d?9gr0~!a!M)l zm`{^?U=O92K8nLl%ZCU7&oPr)BVvJiMn%8LphL>f6a|-LHe`K9wQrIKEPK>m=VDjB zg$v%A)V`7HvXrWgm<(b;$;IX{ix8;&#=W%h#-uhIvkeU`cM^5Wl+326+o-lH9qt_| zNN(e*f^8FOVq7bcbH9;AX7le`t))MS4a*Tz9)u?EUziYSt3PgaNIY%)hsqLY`Le$D0Fl}Nr0*U{=P$3q%!aDrFh0!r2q~Py zJ1K2zpKsgN`tmM;cl$uqr>?GM!O;Tjni~I9VUaJ^Q>C}`*ad%45~)_&nBpnQyE-F1 zHzG1)-j0PUx;T|vJmiTth#bQ{as$$9HXRl<+T$iXKv4Y%M10Y`t& z8UK&RVk#mCocG1&fu7ZelPU7idZZ(V7qB2l<{tytF9GVeav$>VgsDdX=y@e@fjjpE zS-=7^Et`HuTmt-|d@x9!Le~QjTc^YaTR(A1m%O*0YwT$2g~ADQhv=>TxyV<^w zEulNg2QZksfRJY|AyxrKh1dX6sO@rw&aUS4i2jL2rw#sqg7u*iYd8A%2+DXY};XhqU?z z29w3+9)XvLIZ`g5$^!3~v>7SI?JR4m|dChi-OUfddr? z#`sPO(jLFZ938oRvWK33^sSJBvC0?y9hJ#$TRjxo(Y5+Nz7-3W}D7-208)+G^oT$CM?yHARj~*Q$+M z!>;Qtzk7caWDph$h$IHYo`s+#j$Gw~X^8N;6H(}!JRTx@7P6K&rWJeQ~?jIwcp6v71q8ma91s%n1OoW^_6~_=JInyO-bQIhQa;4yN~-gMoTXV7|K|?@?0}vb^TuQSX~CW z14TGcK=nPQP0d$7QTvJwG1C_En9Bn41^Z<38H*e%OWKky1pk>0-5xmp5@EoEgZ`}Z zdoa<`xn)7SUA_N=kyF5gy{=UD^i6@}K4S!)z?3w4=25P5=!;wknZ54m(oGEB>+GV{ zv^gwg1?N)bd02kGV15VOKi4bf?A$RA#{ zdvlV$b)+$(puyUr##B>JqmILIi)V1sZ{#aRa@V=|gb_e~X>leHY*e1vqqK#%E`B8t zHNiN?RA{{&cYWSuLv4J+MjsW^YjxY-x?#8`s%4lm*=gY=D6O?E#5%BhR@OXqa z=IQaGMTDQ1$oB6I-Aj0g;-u$6lVSeqs8%}^$96Zc#^wR16dN9*$iDvFovQtMx#QAE z4sx}rTFR}FY0@+aHA;b67P-v`5S{6lv~(b*?PX@ z*pum}%`#j4db_9r?e*p#Mx@a-?M{o`nA8?t*4sn#u4%U%k!*widbHQ48sj3w({9Tn z*|sO*CPQjqmhhkoLlg_B7QC-5ycGC2_Jj)O69=?AaO${?56hH3bBsNy6Q|vdUkM(U zVvaot!{>`;6FlbHx1U=t+bHW$=+cJZ>%42WpUceJC=-)@Yc&UPmuVFkAoYNq$Hv7P zSYe6N@B?TM>+1j`_XJ+qD6lICEdPXSIhxGnJzxWF>)uX))Q#fqRtXg;Vt{SJr;`;f zt1?3il;*gPonhSN>#6oHlhduxgw_ViaBG;eS9c~=Znx;*b`rvu>; z0f~5wp^jG7B@Dh3&qx^;e>ny{en6F|^8|LqN}txpP^;nbMQ$+M@sT0!sMqS`En;F( zFkH+D>qkh-3~=rtV=hv93CPSq6J*~$x_v~*@6}3}+Wx4Z`wpa}E{|%N;Cc64g<$|(txWvK-4pYwIJz2V*L zq4+f*zX&$&-Eu!{Q=o_iSH31pGcyH0;_?h4ye>`lfhZ)NT6jUf;eVT(Qkp3a3Cf9D zP$Cj#dLMm;cO=(&{XxiDtngzfb+(39P-ZsO*Dn)XwT1%|#@M+Tb4F@a7FPzt=AosI z^&;^`uEZ8t#ALr@wN5;Se{bD=4a$xTFw-F;NGzr#NZjFOU}t)bF>Lij)g2?VmBW#S z>S4Qi^X-yz((SY7MEZ+7BTlx~GmvKyB})VT;_!8F9)kK99P_vi@d2-UFTH?CUMhom z_!&7E%N^f|NAEy_B%xy1us7sD4IylPI1%8uERuSLaEjDOxLeE)O#Lm7tMxoeRf&B@ zY+w4F0Q|y>;z%2!0S4U99v01<Kj9QrsGO|4(ZjTS4*RkhOm0xp2Yi+N}Dvq$X$G zug5sGSh%-do8pt#)~`v(zc)hPQJwXamJQ@QvzLCrmB3?tEX=f9wzUS19UJF{xLW<9Ou`>*En$%x3p1ynPOXuA0gwH zdTTrR!BA7E!EfFre1Da?6-@N_n(K-8_Fy0TQfRIorMC^5^R(59EScUkzg*$-mFv!D zE6mvM@Ej+sU{r7rR3os9jD>{F{z_meC*+mjf0t$gn|c{k`Z`nbsYG$pZ~t8kRpe0% zy?6$jZ(4kPqq4dDL$WK)XV&!BBvD1*4U0*eCjNnw1)PGVO@9l2GW|^fr(rlBOs4x8 z1IO>{FDC!5zi|A&5^UmAq9*S+;wPJe;Ux=xGDCq&XK0Mv9Z9TQJ)BU({xOuosqK?# zTsgO>9ObKI24@}w87ZbmEcg_uAz!b^{J&oD!FfWREuVi3r0@PQ9M)sbypEMq?6?1t z7UyQ(N^tLJ2ibU8XY}Z0fyW=2Ykauq$jZ4AgBxg8XCN;tW_NzF~Ge)U*ul%fCch<}d7*4xay7kYcw$~3p$9xGlF+Igslm|sai zHp+mPBD+yT<2&{fSwITiRKNn$&hKq1@%r(Z=QXghgH_JxwU)QCEcI$G-rS)v!!fSx z*4$IFtv9`F7p2||^&Wy6yelQ&-nF&CVHm}Tv=$Z3ChA5*t6 zmxTMH%JP^KsBI4CXpw`eWZQGv(Nt|n!p+KvSQW@5D;VsJL|S%iRU0~nU2UCy|ABAi zMiPaV8qxQ0qfoqkGC`8wayIO`wS<@67cPjT$XGwoN%Zj98|lp?dKuqh(fyQLjHa(2 z1KsC=S?raE{6bPyNIvNiYfpSe)s6|3AmRHXL2|?D&yLPRq(`QWhv{i&W-9UK)!(eW z5h?8nBE1>RpNflk_P2ia^=Rx@jAynKwk-`Z$xhX_0>NefnipK+NWdnw{Q-Bgm~C5I zrSbu~=!UVV$H4WPFf*moI3CD07QWPji+#}8EgKR$Ha+{lypR&McexT=# zi_In_YBhs>?p`(@?}xrBaMi6}l0n6!Ucq)AgDoQdeR(}8ARxlwW*o<3^3a?2pZnI( zH7-qRa>DOeOZI4Zj9Awb-4Uej_CNh)v6WlTvB|I#{RmLo^mk{cxC?IUymYmg<%~A7 zPH|_4)YJ#dVP>|`?FS9CNX9R+Jwr~b!yHne1{coJvkGXV2HVe%Cv{R>BL@3_f88}( zbGf$yJ++H858mgOUA{Yu$CXhx3sp?!cm28H7O57sDbUSeYzqj(CyyT~^8+Tl^HbZ~ zBj0PpCOup|$vb9Eno(WUP{ykQA^PV1fU5NBK2 z&m*q$(P=Ym%m>wJt25e6;EVzORqiSv=8G86@EiYKojH7m>GMFk;Y3>gVt zu(YAo_Sj`5>xhfILL18^nTXc$^C2S{$&phMmwbhFS4e_~f3G>1MG{lZFsLT}HRU5? z5ym)COuUX?h9d|MIe&5e`A%x`PU62$cchYBqjD-;W-rnA!smjOasZykm!%xNlm0dn zfIf>Mli$EnU~Do_ix!g=jp18|EMV_SuaZ%jaE1~1$-%xVs_DwirK2x!Twk>^qr!iA zUd;9h_jXl)e;l!|0gn94j+@ zYu2XlzH}MayX0))uf}SVXHwO1Ii$$Vw)AM9P-S*CH>k*iz`evnSQsB#sCnN2&ZCxn z5Es0tOFax1@}J>qkJiK~S5n6M-lg49qQRkMrYnqkpEz@~!K>V|p{GWDYBbRHd7y%` zVz&mHjPlj?*Za2>JJHWX!iFM#g|{hR&1cIqV~&>`zm(c~U{6!rPn>pACV7~d<8p;m z^+k`DJs6d~^ixLUc~m21eA|`b4%G2uQ$ry5k&PL%fu-()Y+`3Ilz2m36~rp;_a5Fb z@?QfEyuqUgk>SA~b0RATQBRV?5TCX4kGh(VL}EHeFx(W!q~@~;cj@6wEkbc=c2u!1 z9OhwV?d+F?i`Z1L$UYO(cJY*eFcPy7KZYb4lCyT+7o+@;LTLo|`Nb%~=dmtEDU(8Z zz;(w>3dZDChfCiirt^DjMWrAkDL)<_iWt68_eRDoW=ujSv5VF5i*$TL(lucGR?;t7 z`mJQAlI!a7TUFJdly@_~jrv$VO)?D=C?q-;A#gl#8LNFU|B}M9{PU{g4eb675}`E6 zZJwxTjmQC5PXhfjWm8MA!`P?wj&HE{+}z@Dbxkp#&u;5lUZdwR^c^=TjS|ED+cK?%r8 z{9nCEl@G}bK`#iaNP~#ioU>kliS$KUB2pGTV`cU>-FwY$i``$#L~lvD2TIyUb9)wtZ+ zIL{Z+q(<>yhIZiVV0expnp=p{FT1UM^Q3b@J~h_Mq-Tz*-<3$Po41hr zi@oFfJZ(&y@R2-})q^kSjal>O5(zCru!wBjuZ=m{bLtDEs;n4!Z{OXE_}7n zA1!sI&>x(Aj$0cr-F4i1Hr@4jv+i49C!{spsDw8B`u=buG2+>=>bFZK&Y4O7;l?0= z9v-{Ew&TS|UPUzUXNH@g>8`>NL8?|szv(V9c{`y8T?|Du2y55j#=w8#aM-Jx;OVY$ zIXj^R7Pt_FAPwit8a~lmIODAZAC#G@U+C{kSIQ5Sx4fP^1gA_8V&-Fb!C!=LRSY^- z=mpr(w+io_HPt(>R4w6g$n^g1v3!{4JD~749Ags_oq%=Q;h7B7u{5}MqeNNjxY$7t zqL#4bWhz!5YG&HMC;E!iLIqu1Rk)&cE;J5lJBn|MJ@bFJ@pFaQ+S9P@h^v?Rq8Q_l zNcjHQ!1D|caQ^sFo3H45O~F>y-d3|K9zsL(kO)@U@b`2iVSe<|pvfa1V;#I|vpzP2 z&9Qw!?8I4f9SuD+$6qk67pnUX!U>Edcf?0UIjO%DEG=HvgQu8RcV%Vhm1;e~)OJrX z9+%I#J(h7&o?%Q}y_J?%SG*OI$UqKV+5?$>yxdEfj&a%pHSwV2TUAu)cZu&%sEeYs zP=Dfms-pF?t|w8U9VL$~_;xs)xd-V#sA;QzIUjox)37gv{V5K(rtyL=vcq-PQP$*T zU{8-n8ZHj)+u%h(&+5m2^pMu4o^e3v#+}K-)m$T=U%jkgKrnL;;rlb$V-v7Yl18)W zF~~F$srH^Li;c=I+9d{^Mm_Nh?vE)VYMi2QG>o4V#Tl#Uz7@wfGU{?4MP4^)4O`qiwDdS>IPo!#R+NG=8))0W`L%@l3Qa?q(i$GW~w z|3#bexdvg&hL3`lS~J%lH-vd@T3cdC^iwx#9l0d)*Eg@yv9zVAEU*#tH57^^qrTZb zbr>opR^C9&o;!%F{y@PdPLI4(j)6~}wnQxWckoR&SF+HU{6NPF51{&1%Kr4FA9q9KnyrC02Y9j|{d(?EiuYR~V^ zfTMfghwBXCK5*?SU64K%@Z%!mFq8Dv*0k6yfBK_$*u!$<^2z>L$4Ca`hV4vP`u*~K zpT(&d-})#_7|L*cp1J;h^-qC$z^x0U+C8mNh5FA_3keMy_!AhgKJb5C@c6jQMPk=} zwSQ+_Zo;})$^X2SdRpQnNPYRa?9bx#N%L2nkMU#N_~*#TxaG6iZoM9t6Rj=>UOuGo zR3mh25zlqbD#(@2tCc8g3bC$&w5}g$U4>{}g=t+s(z=S!x{A`eiqUd|XsbYdnd>g> zgsQyV?|6)olVXmatf>Y&*z4BwpL5f0fpCYvS8cT~$5Bt<>D0*xLGAA9@SMKSaM|HE8VgN-8 zieRu21>}mH@a|0hYy9Z;Ljhai)z;#C-zYClRRh^|7_@yex{8DN+!_TNAWdfW zQ&IZ16PmT1$F1`+s7%~QP4npyJMYPMqkKOeU(r_aL)+}>7%Rn=yGF8gGFbDhBmJ+- zZ7#8**e~_FKL7bCe!~q-#~!pxj8-fvAS)4FLguvW940A7u@vM z3$SfS3VDWcaW|d=BgiZFKtIjdGmLbt(+jw_y<514G&;FVZ|*ZCN2MJY3JF1c4&&dG zP4hVC=tedZ&hKo+KZ!A9n&0qceyNueJ)RhRS)8CK7;mo*6{D%szj~zc-Q#*Li}Q4I zirjHjRs8id0DKwqobP{`jWvm@#MR>mtoI=!>#cJ-#=2fJp7nMCkIO5@gH1ebBm2_| zd*?ewd*^6vtdDR07VfNbOpi*oJIs%3&s^YZ!8$WpAJem*s8L(ouJ4ce2=RLUkxJoK zGaCtwh?w^9i}WQ2Y%_Ekndl?c4xR-s z2kBNP!T?XwfdXo((8V*5!$BKmm=~lZ2wH~ybfCuX)QzvgL3t_F=$$wY3Sl77d=oJO zZQ8yDvX5P+E;FWlaM4;pjbcDcV4CJX zx4^Xd`98FOjbnVz(hoGddO(7DQsMZ$N|b%!BaIp3wOIG#xacS#Nx6P;`+K`tI{&zX zvThwGaD-poso*Mfb^IMOGt6JC$*A^W=XE}GIzpC^2>GdO9b?d9Q7lPxS)+G?->{8- zWzMHg@GecisPaxz)b@iQExPxQ11-F7Zsla>qGU`Xt}TOr!ITfv*goq}D?{fbO=DD> zvE|>Pn}ON74Cm%V>nGJJWlfFpK7)oCNhH#;(apAv47Ef;(tO#K-*ooszYk-XEU>xs zcAOA*h3xgidc9Ujyzt*wSWS|qf1&#s8D1BVV&GRsVtT7@cW@3)DLg+Mn!|xs&JrKB z)xDIG-)4%yJ=A|!`@!zBVYXZF)l4MHdjGM54TiZ7P!EOwb=zUf9bXElft+tTS$FY^ zj>XJ9V_;xn5C=Y8l|7@rvzsYVHVfc zdVnG}K{IcBeD;|RD9wXOu{V_z0iZgXzrP$Et^!}ib)lIR!^6}29^Vl!qHY0G>c9>c9b0!HDzn1H7hZL;ThPuO%I8ym)Cn76UCnXrnVoZ1pK*tH7 z)89A;$+hT!^|-SbA!^RLcf{-W6)2Ii6K95b&fYBEX!cb_0pWSk3UGiPu-5cF=_;_} zywZd_CK#+bMnTi&Vxf1N`$-J>rCI98#;VU39Ea=lR zV{Sv;NTBa8vl3d-Mb9Zczk=WJYn2Y^OggqPsj6QwNfRZ)tk9J+Q0gd7o z!?^kmwpMc2qmaI;3szt=ky+ya>C9^WF@Wi`$hcMlG6;WKuinWV-iHK{9EIIVyEVJ5 z3FgQq0ojamSmuXq&Y&5pYq6#k6KJ8{KLEayZJOio_-DULe$~Nx>`v;Oz&GBQXT$Te z>a@MKe=e~^v0kGM^0&Ud^0zgi^UBg5DHe_QPo(v`>B7elTbiykzvj!jKTp z0C>U{I%?ifJ4%^P+jewhCZSpE>8HKRnJ0UwgineRAWshr$ zrKDMs`tdl6TCea-^ZaB+oyFSuA6MOIWs*^v78c!pS{Ug4!NFm6FeES6+zd5S9441W zy&fA#SxA(UfD4SxK(jS2%~}5S+dtJqR>YUqC-7fjFnm98rYbbhj7Z{b$Jwf3Ru_6b zkzm{FYkBuM4ZWtJ-|-7-TBx~gZ#K6FTfWQ(UlXG{^GzO)|dGN_Q1u)aB}BvNS}Ix6Q-@lB3~o8Rm2xgKNhAc zJW!X^M`gkj?jaSfofg=~e)gKf++L|BsV~72E*uTgUP01$6HfS75D zq5lj1guj(gXBYh^x;1A89k{(5L}2{lC#_753qy~UuKJgl436rtd1&&OjQ6mx$MuQs zxSX7_kLN!STCQPSO#_c&|1CoKFqedn9#r#cKqXb;h#1XWWN8^{ul&GJo+IwUmP;e+ zz0WOXl_msNYt{*Lzw$BI2_y#gbpqjfg*$=3?jvnDhEe#(Ae^&fWweWlfAs{>p5{Z| zbJ|FF0^FWYzdygCSWmra?!tKPJWJ4HPU?45#{R6VjI?og&>Vd^JC7)Vk5gIBr|`=_ z$||74f1J33mv7Vr;aTgC!gR%=iu$2S(l$l=-G>TWxj*fX)OKTmt$z@FRjW3n-&Zi9 zaQWD-2>9Ty70;q_cgw9Uf;3v1A)Piq;UNVBO|wvXy?u@QXuCC=V;DY;W9hR@W0jB- z;6qcIFh(#|R<=fS!JOX8KzYPYizoon`=Y#n&h*tB^ZNyu37biRTaZOC4^>^{AkRWd z=>9d?(t;+g#SFvvZr}Y!Udw%c)6v~wiBAd8Guor_1cEXA$0 zpyqL#=+z5XB7-@Do+=a%{bD#%{bm)7ah98iRhzMa{-(*;AHs( zPn!E}+UUJMS5`qwfT_4ABL6A|pI`WlWiceHU>QDzZV#NsvkE-?WfU67m=ol{yb$RD z3@TZei0Ix=?ZcfAVEb14@lj~K7n1)`W7iFq%MCjoemHN20T_GE`yImfh~Twxn;p8| z)F6c>FOH310yZm9O9pUO1oZ&x{BFzd9>O#!h7`MZaP~1!2;#on_L2CS-*2x(%>ZRVfNlAiG)LEnSHkN6c=MQy5*R3yc>qC)g;B$iqPrh5xo#m<+o=SZ z+sz*i09C0mZoj*B>$x9jv-AGnoD62!a8+#6iH(MaH`RMU@IyY_SDj0vqbX%b!#Kt- zg2*(Fmj2jVp$5Vmob~Eh5y(Hra(Wb90*Zf^>}KqDVC+Ml!(qC6jRo5sG^8 zw-63@6@5GZt8@8BDZZ^k`T25JeXN(ctphZvoTDbi+QS@`!v4mog}dM5{@`oDQIEhe zR|N9Aui>Rx^n6B1fmmHc*Hc0f^8{otRAR4H;-zQ4%_36(n|eGU#_!}2GuwN*bYCi8#-(2V#6EBlZb9^)fq#B!FFD5NuMlSFV5PfXb^!e5N1--@ z3yAGKBt_Jr{ipBxf!OO4fM5&abtEQIcs=?EKZvUARl}r;fW7Glnu7=D&$f*P$6`;+ zUoDXSu?@Qj5jsA7WsZw8M1Vx)u1F_9ghK2|GD;eqQ*~|K?s{@rFpUpbK&^~rvMp|^ z3ghof`*jJ6L1Gy6D6@9}gMLQTv>7SUV<(XCE`Q)9`imB@D-XChO$9|Ab6)_fsj81S zn`pI@Hj*}z4waV5)o23f3};R0LKn-QYI__&ZZNT6kA7M89bNrW7pHtwSC_^hVNgIz zmh4#`9i-@RH2{Y}tglq++ze2fAttsUehP<$Ck%!F!vw!>+_a-~${r+XgDdA>jeS#3$!4Blti&E{V z%}eE#816RY>VYg-@7t{tHDGZV=H&#_EIZPGR(=~I@&lgbVC((%3b(*?M|(%j=^&s@ zzb2+n?&h9Yx7zQOf0_v>EN{k=$_tBL{zY;J?PWe@|RQP63y@kFT=g#O7O2Sj6_(-7l{a?*O5ia#v)TudTtZX-z& zeq$H~p(`Muk0PKeAfk^-#*uZ{w2z_lOXF$zp$UDY`C z303r>!73A1g@8ge^v4b*Xh$~u-=AE#Le~d?=<&@Ed2kGp-yizV!6fKxfNqf#gnd{p zm7saLx$!es86oMHvZ#~=i7xC7i8Sae{)LOduAw-jffy%V#1PB!$o=%p?N6*lX?- zKkk(A`q_j5h<-(dp@y&0O^4*BGVosG4g8V*yogQk12j(Cwj?*_sPApZxP3CszEjOb z$6E)CbWN2)_x)LiM@a{wa&N~sPxU@l>YN`sWHwW5LsUXJRsa0S9-iNw?SMt!CIOno zD8M}_l;!}Q&@Eb{j9v54x}vjVewNPgVfv?>_%6gi3;6^apy@trs~@>i+z0-tSw&xp z1;zl+Q-FSHGOnYZj1M42?3w;SgqMZ$HlIa)$zify8P|I_s=2!Rqq{d@s`DI8{qv&1 zNz$gkvX%l_>KfE*xiN}#@e=#}a{qemU&3~@;VK&W^lP4qi+cvMgQmc|hwti?Ua8=i&M4MP7@YkCGl*4&$9myJzjFx+(|e^lD%pRc#Hu z-OQfBz$l$kzhKvP5W&V@Jm;L&sysz$?SRW5>iR!^C69!YjkVV}FHL_6m<38?P*q0JU0ByhV{cmAE+V zO+B-V8}J3;T9aOJH_)h-sES{CwL~KI@UQzF`&D7 zrw(%y)NKNLeiiE)KVAW3h>6Bn{|(=U!gVN^(YG|!-A`QLr;btr+k?h}YcVkP+ij^7 zsxgek+wsutRIki!tz+rHJ1o?PTT)l_S&F+(#G-jzl#t?ypQlM(ed!pOTCW%j$N#3R zhV!z8*jYpid=_tf?}~jEP1htN-l!B^wlThT0%qW6xrca#j^9C8%zlkSX(XX&kU=1Q zo}(CGF4D5U+!PF6M_X$hJVWL(9p$Ec?}h9?En__JJ62Ce)SjVq&_if|Ql}Xgqo7bl zjQJi^+2z&vU`oxO&Z=R$Of!%xQowfq2-rG} z6-mKeBVIG^>WO<65+z;xr4p34JBqSd{>=DA2fn;7U5p7iJY+vw(~~nK(bp^JP(0ea zcM#t8sXJY_H9d=&_O=mCxI+#Q$d_Fr##>6H`@W93R<`{aRWzX=%Y?_}HNd=c5a!_M;F$xk zmICa7Ptmzd0}K&lahw+&_stf~*^P#2Y`bE{Jx2p|p$lvCH+btJy_77+<+q6+>5po0sAw z!J`0+$dwozbz}zuivvEe*-t?`QoTcPg#J1J=*QnU<~^k+-V7{Y=_CL3#(#MV61i9o zSK!WTE{6y5`Q-N$IeG=QYMh@Qo!Q~~*wOgdam|blLK`E{W{)z?>Ije&0j0MrtTT@X zFSycWl7c`FFaUu^J2$bblp4gA+m}Ju*3-A!&r*G7Pk;Md ztX+_|c9dq$A20=+`X@Yf)YCF&4o^Mz0wUL9bt?IGAAsT1U_y+?Op}+wUTd_@Sz{oW z;0}Vh4|no<$*fE*A0ECMdOcBh0ZAr$Sxc+;T^6ds!YAtHI`2magIlZ-)bi(#dI zVOW;j12CE(p6j!cVO87NW>|PL<2mai>m1VhqevjDecdL+E4y7wOlCh4GscAyh;=^! zRQoX>Gdp%6jxU9Mjl*BpH!L0co-#jD>m}Bvn}m_oNAQllHD*-AZ}~}FDM4~zFv8)C z?Bvym^x)TGmmRdU@`Yu!kea?F_&S2z)xfJqF|$4Rl`ATP{1a z0*@am-{Te%_d_UOGIP1+o(teRq7eY&<^282xdEUm{4?4=UoOfC{UFs;M84MpWk5>*H1tUxu?r9Kqvd4XwF&@%O-_K(!7+jSdN zLgR(+=M_~7=Ti~sm)(>U%NrIN4JY;*_oszF`pZ5FY(w>b%v~t|nCstPI5|@h@aRhw z@aiEc6LpESxpUE6So!@}-@0wQoOU2r|1!_Hj7g4LWnZee*vLk>*!{9v->nK=+)}o- z=@VYAzBYELzRq~`=dX2(;wM^MKml|+IVqn-pF2ra7o?E@GmndMiIXko{QBUGA_B2BF zHbV9_LKg_sg%0Y%19g#sx@cJlazV&FS+WsGc&`HJcnHL~iNbodKwXCL1_b0D1mtZ5 zWF$mndPHO?L}U|0Wd9u52u!?J;dDHl;@sR}z5hU66D$NKNXY)VvJp6VuW<1qaPeN@ z;YHx#y~4+fi2W>U977bUoG3H$#65C_pe5usC9J^Y-@S(X{O|eS)6nl~NwxOj_Mn&t z&7$Gy<0toXk&6ghb?kl!vC4-~fEB*{ngB{uyB0(v^ZWb9t&>2*B*d}iy!=*dwi~AO zpE+KaGt*IT`ZDu2*I>2P8?3e%d()Ofk5)(#)y_ZqFsKo^ndFvz@NhL2B(7-k%4$lQ zsO!K()tO(8D1-T6I2hs46(<4qr;OkmY(2w8o z*Xw3j{QwLcOoCBTBd9VxbQzLsXBI+%kp;Jd&&s4wGv)a)`A@5HN}*(7!0U`hPDa@*?xz?g=kep@Ca-MCpHH~So)z^>e#65_SyBslhp^DdiTw#8 zhIptVO72!8tUj2ity`K#g#X|Q8}@(>H&$|zJwv@!XCUkN+dk74(nt+2x_p-BXgLr0 z!P5ToHu&HF)`tgue9Ex5++tOh>}T4p60Ia7Bupd^MvKO0bQ%<3f}Wq};ER4E#Q!;=5Ny+uo zkP>oDx*Z_DA4%BpI3jrXAk#niy#7vc(Ls^VY5DDBZ5>Tm0fkGrIaz(!LRuZ&YVW`& zUKw^Gv)oNU&H3Mx)gayUpJXyqIE)pL-7=$+TXc|`jCkex^Vmx9Zl@KJ znTP^4)jrR2|3-hZrU)Mv6mc(dvL3l;sI6S*73H0x<+!~OmcFj|i9gzLQTFd*d^ZP$ zhT;>}`dm2mLiJLk<+jSUKRo|5r#Hj$!?(>(CQW1LuS$05X<4eys2zD}AKlG$K|H7V z)~l7l3=s;Uaq43?@h@Hjp?FKZx5gg!LvLn8RU- zi9%H;r{;RAL0wItt}am5pyfMpN^HEeNOyL7!LZ)LgGl^=T-h{SJjxh4dr8*a5PC#E zUPM1BL_bwTKNCbh2Sh)AM89}Mzg$GW8brSy6$V8t^e8O!F)VbPud#wWw1V%Xw?=$6 zCVVz#d^Q$*HdcH#HheaAd^QezHcostE_^m_d^R44La&h!5xy}nfY3jQ$HpMg;vv$; zAkyL?(Z(Rr;vv(Q~HQg^O4dcOUA~akM^O*eq%6|jK#yG zjlrbF!=jDBqQ!ef8}o`551Tdyn-&iT`yjv(ztFufTO$BbZc^{^!wHy{ErpGsJLd!48xC<=&J=orD@0_(T<=e^ukb=5_Cf+i zSO_GMkX3SRS@47kT2+al&mZs zUPugl>?;Icy#K1PTjTJtQGA*oeW6$kat?j(;eFY^S3D5Ghcb;4W7tno#>Ozy;<1e4 zV}v3i#{O4*J4WQhOPz1t}iw-db^jE*++>cJ&O!!*mW) zeKMnizSE-JJr`4ZAzfe7?`? z`4gTW?%bIZ?|WvL9cJ!1=br7EUA@S5IH}hhtLT2!Ci#?=l5Y?GZOD)Az6@Oz^QQ&z zgc1eu8PTe8V$&2`VFeYxHv+~mXkBW#XkBQ=VUc}#ZmDAUN)x=A-|E$Qk&FhP&lA8m zqc*vMro1tRlHV(QKw8=JE8xf1E@UPA2H^JrV1B5(AoBaf3yBO4JRVn-ReA)hrk=%T z#H@$Wf_V_v9d(n(eW)r;K8OcU_e<;tgt=V!;jm2nV~~40>m_$F5fHeUwvgnxX`z|i zJFA+hr>-AYspemuSV(=Z_PRQ}kdi@FS~RwhJRwQU8fWq@GVSI>J0tL>Gx9bnWIeEF zhFI?G18K+dnPLYjAU`k?_uw`jamWf5aOs#tPIGek<`iN>l3uR8NO}<1<>?|4HUP{< zUqIB~0tP+LEI8cZQL3o!z~f|{Gtp0nfw@c`w;jEwiZcn@wv*iFXYy2)v|?Y3;vKrn z7JSyGYs%X09(4CBHE`>rOgmO*hysBiyc^6C9SVow)Pb`g#UvPX>n2t)#)a)kpfE(t zO)7Eq;MOWg{4|;%_IACKuLor!w+Ca09tr-W`j1QTO&XblZ0x{3fueo_t9uJe;zWB? z9CQCFf@HVf-sZMClR2=ZP7tC^K9u0%{TX~?g(Jf?EY2z5Qhhqo9l5z}t6e(t=TwP3 zxnxpEQuYW7o+opfoF@i~XgT>pmq&^18H#7|9PU1ni7g(t|3WeXuMjr3*|puiC;}?T zefv@15rflB+e7x?_&xqy1Q02_HhQx3Pg$`g_0hclN7b20#R_INeJ_^8(n|8^JNCp9 z9dd^u3KQux)rkznc)3pDg{E}a$sShlDaT_#cO8YyMW7DEFD=k6A_rcnA!pe6{1tl6 zBp&vo^br|60z^-hex-e~N%!-U{X`ed)IE|#{3(Ng@l(d>^G_MUjK%j##KrfCV2|?E zyk1;qpa8l&A@S@tbj9~|nhO$L4H_k=$Sb3iv<*sYOj zo|B%W@8rG{`2!iY%!DU&iUHUUCXqCBn&*v{OWep%2ekWx$-oa1YxJ7u)yp9sy53dK zi*M&De?!uDY~Wa=R+$MX!oY9`xhsJa^5WZe*Jb>59#GWcV~NP0{$SjWE-{vSDqo^W zByg6#1N&o>35)9&gFzA;IwJbT009vEpV8X0|7ooLPa~)MKaCrbNu(Qb@ohI~2?z%K zkHy^cKNi{lbd)l8vZ^dTvWWhHblYSm6omXvN#C)s{GZ^;{}rV8pP-)R`Ce{8V2lG= z6FfB@-~Rzxi2i3?jxCx{k%17>KTvq$BLBo9;TG0XVIMEFN&^;j_Cr^y&jAPElhecq z1t9GnpuO*ag?k;5S)a%6oEfuwkfMmL)g|eWFX+j@-)C2vVV#vvR|ZHIaN>*oIgr_L zrDBkQ_@4u$H#UJOx1A}=nhCk1XR3ajO=8dr{PJJD~R{~>)txakmFb;GW69=2yVLb~B&89Zk7YzOc2 zisex~GB5B^l12Ehn3GH2))=<;T}{Q4lx9)nLN0|1MX!1w0xzvT4o>-yuN+Yv6XO9odTe@KbykfnT4+bHt6nqbu1QmzRHm zYFw^fsg88OV93Tr7BJ#_B>)C~+HC@Irob%t#094A;71SNHCF;MNe-T-K;w733P3=c z@|7bhx9X}gXBK_CJpum|?{f153>u)jw-`TrV|Ipz-XG?ItsOU-pQ#{$1`RYmOOxO& zN1JYm20FYbxJw^WDR?OWfs69akunpXpTOL138UP*YT3TpEmxF^`Is*6nl15^AC^H- zFqoR5VQ`|vC`Q}n9i()^2KdiQwJCfD_j8&7eoLA(Y5=FkKAZWtbUSKcMyWP+KR%?_ zpOmC2t{BI!Gg5~q1om&^c}+bSO^h9DIR*4^BRU3w=*oa8SQ6m@5V#Li6g`kZ z%5;Y!&LK!AB*N|hwiqsJh`NNQj2s&kEAX2GGHz7D9OMoG{`a{ms z0AYO|fKmczWGeXb8<94tzr3=G%P&b2cEf!#-{C{j(NNJjjyj)#};DJni9VGIi>6D67CM zaBzF4arK7AefazA(WZZ{7`-*IuzQgAi1@{~0gs6igNHQ^rP*y&2`@x{kd`oVi2qoK zm8$<&^CAfKgob9m?(tEXff@YQD*fH@;-80-K{X%NQj@P<1o))(3NJgbv>nYq(VD5f zVoT_7pv|I0*MvcR*rX+_t)3F=R7OqCv+S>~c~@LOT$t|G)k z3OL_3H`A(3dK-*G!WL0lQ@V(@%5CuWnfEN1m+P)-i2LK$Q<2qF^AG!Ykl#|1;>#Kx&PjQEnXqUez{of@r?NWjPq8?vR~9|Bko) zq{JX~)LG&|9Lqx2E&Eb(0*Y`eP3QkviN^*!Gw=N2nmF@G7PD>+dou3RC&TU7y6?76 zaI>emi@+Ibx)0Hlmdg=0$V%Es%mT~luj_9wH>G%uOXzf~cDC~@nLa=}rTOu29eJ<` z4IRKmYM|JJGp`s4XN-n)tK7jp>;YxDz#*?OA|>DQPh){4s)`u*fKG;R=9ew6@oIg6 zWl%Xt(D0@#Zw3k1ZoI~yK@DMdUSm53-KsMX;U$Rpk_6Y01?=S*2@R_;am&&bI{h=y z#3T`+ROhs=k15Mn>2<4)c66(99)p~T_)sr9I>MQ6ASxm?-!f<>KJ-!xG%$<__kbRR zc?QDXg2P~ghVg)BB4?x0+47wj*D)Ft$bLduPVtY|SS2mr^3YDDlV3oglioq4Q+g;Q z^jW+@C;ziN%g<}NRSkKNFogokc57(ooiGXR!7d0`1Om%~J%bcwc?oz0(A}UZK6HeJ zwIJ{U2%IpdS0x16nE4-IJ$UA>;H9_$8FZi~AE+4(_AkI*5A5T?o(=3VK$BTO#9d-u zV@N&(J37y@-BP8~zX8;L1nQH5BXzt7PoW4TB!eeCJgi$a3SQf3@D!LFLB>c(C>08G z01x+o7PM6L?y2E@2SWyaOn#ElX0-8P<(DLuw_Ez&vF{$XJ6>3X3c=nPT3MlvfuH}7 z$8*MX9EsEYYjwK7g%L2zx~UAk1>Xk_YJtg` zvv<^ADR=HMf#ISHq>D)qB@e~jYAK*WNOWl;cLEIU!xQmY1H1ci?W?%^DJ%0x^;sC@H9-<`zJ=tZF}#IX-uwZhHqHK%HP@Dm&UH^K6dp;* zy}9|Pg-5@@Kaa{COtDkbJl}}__pCKir^uTHxPId%%?5bH19tDW_bxI_g2#avqo4sG~DgT217&n1B!qLgRk6LOR;jdJZm4CUJ1iG&tDa5Tf~AHtcX!`G2F!fE_e zM|X&TUV`wOWBZMsq`2qbj8-<|FIlFTHv#<&UEq(!2K@YnQ?w6|ow&T;(ugl)F%H_w zll%MER7wyspGgjW`t7E&wPrQM;iu0%v367&&ROoO`*(YXo{skLsbFS)?&YPQ)|IM8 zLsO?oqUn+m_(5I6_{wPT9Tj;w@rCMb|H6wkf7Unxul8b|B#nE%S6forr9VxUT(*&n zNBz)<*CI#9iPlSL!ktYp%V&K-L1cZX>{QxogHYGN%iUDULA$=wKhaTJwUfTua*)1D z)WsU1Jsx91minWGFm?wDX%dx%2zCbs>6}gX!hmmLx__Z}>VcYOup<6Ca$xq|?N77< z*E#S$Lk0*qc9s4JFljy4G?KK)J!V0%2qdr8pb?I}YDbr7VfJ4K+t+!bZ;F1t0EX9Q zx&E6C)BzGWy8Qce&D~z)3*T528~XkOVyX6FH!p!^>UBNoic+-c)n4z^yU*|^5f+j? zjj}<@%x+?z6P9?k7o3iV8!c8t52|XUR!=_)WOH8AbWDClG>`J-o|-@Sx89-*@AZi_ z>-Ta&O33lsH-4+=m4we|&ssAY+m|?sitE%**3)Wi9C`{W7x#eV!iO7wU&p1#Gn&Va zbA}wi*0s(< z`a`xs@k-2mIA%TqGk+B`{}?kLEG7K_qmzUe{>T&qCjbK{3j?P$uO)l#0qYzd>l}Ws z#c8@>Rf@ap8`3LzTmL{WZRcMa=v?!pTv>&(VZ_w7-7Fk!z+VL3>rd)`1HnmvOr_~Z z1?Q&cf9e`9?TR`2u8@PLG*S;H3_kJYX=~-2npr;*ETC4d?6ur?m|EKW(X6AY&7wan z<7%tgL;-DL$Sh5k!79&cC5*^DKs*d8113B!Jl>`Hz5-T9LI2(Kue$^!?+u*vzglKC zUp6yGfuO6PvJLH)Iwx9E1~FC`8*wn76Fc&a^Z{e2ey6udX$$r7#{TAs^UL}#KIKte z(?Ld2Mf*3j^&<6NJOE-SOZ06{a83ep^(oMQzG^$D6wy-tyc1|&25h|U{)Glsw)cO# zvk<)80%L2?-D${U{{}Ui{%6y$z7ztaLDqK#h+YsPH{CN%Mv8R>oq=RJy%6#1NGhe0jMWK9;(COc`E z!Vv@6(YK#U?ts|D7X$`=E;gAQ(Py@;^Cra{X_O{euJxvkwL;J#V57QVv?hzyWP>g_ zD+jp13dkrhsruus#W+eu$buA7EwrfZsG2XKS?qzomEqg{l~JX5nzFJJw@+6>*T8S4 z@}0}-*zHw?vuyCS4UY)HPknfQ2w9QOsRsXwtl98fYo~yaW&-QS5KGEjm+DA+&ba#HUc2?jr8alrdOkB7oGFe7g-&7q z3;_bn=2Qd%#OB-CEB1zOWiw@EzC02>uym2D#Gd?aiQ%&vhYlPbKa}FG3gj4JVkUkX z@F=jSw>adCjotUb>yY;VL3~ltiFtCWdlA3ADAfKL)Lsf|FAKGQ0kv0x+N(kBwV?KT zQ2SRZ!B}C@f1CRMHof3xZ2tsTkkq2^axaoY76ZQ>n(RJ7aS@id zl4qvrAATmXM+N;$2mQ-*6UzKu)jxWIdykL5U)2RuJ^@p{5>p;Ap7C%Z-~?9|AHN;n zVk|jiEGuN?U|d(Si|uHInJEvM`&}TXU>NcE^dX=@#b4ilOWJj-6D)z|QAU9kcuCgv z(@x>zvjWN+br#JDJz%iu>$Ze@ULedfjK8hq9^dkEg+?)(FtxpODYc!z;s|Z=HTjpo z+rTZL=iKEI*8G|lxGn-DTmxl-C@c0J*blP=JRQeV!)H>sZYr#8%9@ord6Nv;+uuJa z5in~Tf(Xa$&o%=IV^pJ4_s+;!52Qu@%in07!*rZZ;2vq1`-=!S&lL-*8<?IMOvDk zzS%!P0@waqxUmkZgmMQ6q65rT>KUXf_K>S*G*^eRf$wZdOCCs?ELl5Ye4{9_B4^)o zy>TMvRw8?X-K8u^{hB+$>%pK*TjRl2E|{LN@T|dSl|fsjjUxCa@n}9gpvxFuqhedl ze3a<+p@7rp)>cWC=Q@T>Hg2VCj&4K%Zb9BN7%Tb{KHEoU;+uJSe$>*60gusam1PpkKMUsylj@eNB(-@U*<*FiiEyusT4z8y>A4+K}aB2GNb_Fo4 zMpr-1y5XzsjqON2XCzyjV>I6^b}iztCx_Zo`##KkK$eL|mI+3Aa6|3+q4q*h`zLq( zYPFd1lbG`7Qhq%N~#Mm5$GxFb-KLTURqru~Y! zJ{?Hp3qOTe^a1?4FmW^VV#65$eQ(-m$XelTCR**fbOqk5d1`pO%k~DW?DrcbaAoBGsmK;`cl(M885ETtc9;Vw7@ zoEHH`4IQJ^0Bo*m=nBYX@cQGF@Z4>JII}b$U>0_}+1viFI&ZbJ$9m^;&Wn>TUb&S z8=BbJH<&mf;?`Q6#C!Zm#ngfQ+Uwn-6Fk8QE*EuJwPDn-iM?8zv?M&IZ9r1}&0ytS zE3t0`7>xUTQP7=hvHdjc!^<&o)2S5ryuGY$Sl`O|6n4WKyt5DEG9~l=F3h&ECbDxg zcF|KajZMi~ne$!TS=5LjqQ#n+7vzS1z%#??4$QLDpg%t=bL}?SD;737?xGH_@99i| zpv5`NlGXZ$OYXnL?T%@ ze>wmbq`{unzH7bBDkBa{mx^ce=2=NkG7Bh(fn z)C(gt0wXj9BeV!3v>qe07bA39mopXH_)W%FV=^UcB~N6CF*&O-1*W{y$SIa}I6rokh&?;4El{&Oa6I!JW ztT`n#lZNHA(O>KDer=g+w zE{Pl26}!+-qLZr3c=)~d94hVR%dE;8x%)A&f8CE2PwF}8)-huaUQKBCIkaIRAbV9k zJy6W*wEv$kw`v6w0!OWOGlMng5Io}N5surS47+n35@pN&*`97N9K1C(E9~Kx`1zlk z#kY++RB-1CY}MKM_a4X9M^o(uyu+YrX1Hfb&`Ar=VP?7Ap#t;r=tC{BIf^HSZrv?f z4LsbgODAg|uL^~1nhIl+o3@}_X@MUwB+kqng|_RjvY(<8GCh=%b82RfX!*de3&X6( zrB<%0RZ%DFHS1#oj0w96zk@luKBrSfyR<*pF?0Czt3O<-cXW0=?N?e?&DSe}vgf{2pdqzP7>_MJL~>Q@RHAUlN+v{h-X=P4(Q+ z<R5^VjiyNy>UT6^BO#Tu#J>E}GSD)o|Z)I-D2^?#xt>I?MehVt}jKdXKqP6<6n zCYj0b;RRbv79nZz7UkbJ)7Lx6YJpFJ4|~b2V{EyT66{w8W|cv3<(aH zij?rq;%x%V2e1_Wvm`Pghz4Mg>EJ7ptY8A4-@86e3-27MsLI-DHevol-ix`2)X*@+ zFG3mi9|h!1#i$5*OWNFPD4M@86>G51uJ%l+?XyamP%#=M3wh__>t^@sCa6hxHk0ii zf72x`ej^cvefj=~Y}}Uf`*8Sp=zbp{X5JuVuv>NsbB`F}0MdwdA-7scmK|nbdM7{@ zT!+-mLZVQ$Uo+PdrHDgZW`^_QPbfZ_BF0?cx9E@lbpbCPzT^RBw|D-YwKZU_#h+RT zU6%v=Bf$MI5)c8i?107*^k7c%1d}%q4LJGwb2t6tuk7eNaZCI~FdIC4ecvAE`VTYs z_ugcb_9Y4WL9a-scHPh+vNH+wS2J%CkRQ$usOOB|swF!l>fmp5SjThBK*W-`0n{B=t zyl$Pw*bX}IzLMOtX;II-s3kiWh`n|4jk}G1Iuw1V$9WYg3i{?m0aIohxdx#*jy~ih zaUZglxGzxJN&Aq(1|16?3J#3-7>e!)_m9(C*V3N5O+J_t>O)dKu#p?K8A0rb?vN^R zUd4&R1rkIdnf>VrJS<^Bt?`Fp-)1>??k8p*d?d^72h&G{=WupQz_UR%)5{;7h+rL z{yF|8O9#I_HvWE7c02w<)FWDE7kdvrh>uLO`BUGKkPjDcr*B0NaQEgVa&MR0}F7%~AIt0$8bYdwCOP^@3{knMpVns{MN)alLjOST4sx;`B$IrSMwcVx`CVZ(EnstS(=bb|ha_ z^%>ZMu`FZ#;q3VwN+Qhscz2JeWX?D`XCw;&SyHO7f%ThV%?gOX^Cn3rA- zO@WOQyhB;#SP4d{!^Z+R&AUpd`G9DZ1w6nE^=qhn|m&T(mM7hVl+4?q5I_RCEV`%Th4(W=9c zUx&w#U0@m}%c*(6>PD-Q^Jc3Ant_Gtf-Qe{IXu%RI)J`7Iw0q@#@`!J`CcM|hr8s2 zIqH8UKI^(X&tL8ccjx(kQ4ZHiXbU@v@1!4osz&1bu{lLZ<$kNg34etoNyCVzVT(|2()14XbbPh zU(E(vNWWExg>T%+u3y|q?kVj$WJ9zI-fI)S+Gi6EFeE%;rovArQ@$~`jysto(^=O$ zvec(tr+GrVo_P4XO{A+tly-e{yx{3x=lIl;NLLw%gnb*}K)WXN^g19&$wF`g%wls^ zbJMG6T-h8wwODUPTMPiGCukSR^V~h30CeS`zrpjZafRaiXvZD`y2|uVURlE0n#Kw}qdo28ZWpb$qwZzDzF3(W$=M ztVZr)l8z8m;N_$e%jb`#b?unCJM*o0Fj zra$Y0HG@YGiauc6Iu1xIhUAXlk1QR3TWTNOfXT;q%=zrXhNx1e0rDLH#f2vINC1rf zT>B$gfJITYPFLXJ58!h%!i9Jgk z@mRQRwVYOLyqvK>q-^0@qHO!u$Uej2h+Cq&+L940{odS>Zx4=m2d;}HG5A|hf-~$Y zm@nGm?(^G|mUeQjnSdm=gkWdp%qF#@FV+3e%NO<4th=PoPu4!D>8B~}6vifGCS#Xm zB`4TssN_th8@$nYQo{U=8~mR!nVoYML5qO6eefeGKg`*+{50NY!9i$+O>5?UWnr5GfZTde!B?MxIQ;#rqe{&uq`LJV{{9qV-9_M#)};# zmB)CKt{~+ar6T3J9PiHhSw%`@KF*!BH_qKMk8T?p2R6K?Lm%H!c6#cPAU`Neo&2KP z{$29!D>mK;FP?XX9!Yje9-}1R3n^Fm7E}`6yIU}O}OBfp{22`G;13cfgqX4ud z8dg8JZJsr%{#uDD{dJ@ZcQ{;aX=k4%@FoJG<%AI9#)0%wHVgMpINx(>++T7CG8vLo zd74cvvJX1C?1|Nv7I_S5X{2F#Ys~(wO?6J>5doCtpbdpNmVZnu~W! zuZ!AA+W0=NG@C30oYOmu?AuPrY3yj+Q}3!4Wh58YIA}aR>!PFX%`>6iPG_Tjb^nOE zzAaWMs346?NT1{Lz4XY$@oPH=@O4ZOdlW|vuVcwr+Wz=@;-(0qhF2-WIsKL?jl}Pw z&DMT{VIn*(;c<71EQgln+&emEGj^^0vnH;+4TIFJ-_JG9s@aDJJPIE3Rer85Wd4&< z{41uuuq2SDv=(!mK+A!q>UU81vf|g}u584p5mpy}FkVyThJ+}tz6@bb&&=fwWbb|4 zC9OvOu;`3SAFWOcvki*odEXsk4<1HmS)Oa%Wt?l%X$%<*tqv~4wLxJJ_1&#d$6X!@ z(jH{J7MWSCvn+2k6`9Gcw=9P^bQ%L6K_M=_KL0FA8IMPSoJ@r|?fz$Vc(HZB2?i$oPc7AqbYonL2SB-tJ zBD3`WUYaUv(8mJnNfQB2(ZXYeztq7(59+1YX)|#|F2k_;eXT}wbfHm%E=)M$4ws(z zGP=l%Vx!$*T3+!QOcWMQ-)tWT-fsc_&6enw%0~fmaX>{$52tj6C;&K5L5}-wASZLU z!+GsSVy7CF4i75t%?bwZUO(L$A1+d$xOOCeqMbr=(C{KTy{gsI_oN`RP{>guS*YV> zCSy~T*zH_*5eGs`P|HuSV5$^z!XP+NTUDs+Gd0<(YEl1-nM`s!=-lC6GQkgXp4QJ8 zUq-3lzCaCPiRgdDGk!}GN44F#OaB$m!cmvmOi0PE$|CG+?Iqt(*h7%ms-egH{O1e5 zss_sKPK_d7QfhYcO=Z83ewmO&BW}{)YnYvX9rkE{JJ?7{v_N3yzd3_TehJ?BsAhw-mB(oS75IB>b@&w>HrS;#SU{@lx`(zV;$!d9)X%#V8X!Yy@@i z;l)%{_t&C&5^D;*Bq9$v84JuV4>SpfJ4&k%nqo6u+hV<6=R5->R`mT%7^iAsEC5L2 z?g|e??_hRae_<^{^`e|DKtdyc+#vdjxA!wAuW^zFqs%jep(d3 z+xr(*N?8E^MOgqugK`pLv?$^CB6mPx?_cT};2I?P*oT+H@ALqnr#?})|9Q~J z&db1ASljLWm|DWi+ou|YwFeNiK+%(gb0GW@kOM1yIpc@j0LZv6KOPFMu)bLT|2QMYbRlxRX%<^eYJx8B>wJF*(sL`VSi68Jg)v!qprr`U0K@+W|{REhv9a*gW;=t z&9c{Wb3Z#om%zLF6~na`sUzI+bOC+*=`~(-*N%v8Z} zy+c{w>q!G_F|S5U{-a#6^s$G!ZuEUAU;zej3cU(*=vHPRdvq1Yc}hR{rF0EDl>zRH z8Gj1a3t6fMg@7KdmMZF~nFJ@Df)z>d~6n=+S&eHrx10p{-@h7YLsV6%DhpG2uz$Mq|CHP>;AUC=cAeI@B8T!i z>wp)*Q%*Z(vrN_k;Jbho*HbV5Tri0+3E;Cy;kQGl8R~z{7ofgbjYUBUd3j7F42=Fs zpej=hFCfj$ON{P58S+3t84$UDR_j6Jj_LN}r$&el8vwPBzq4?k)^uPeXXX5XZwKw0w(cUd_}=c#%O{xgc1;gRZO=ar zY8}340nR$d5pxXOs_DvBcv5<^)op)ERuA(#hcm`DgPQsP5}U3S-(<(LQ_BvABhG+Y zqZnGZeG0Ri6jFlC(RKC?+CjyRfQ$o?3Zlu=(_=}s3Hjnl8>ho>c+^kL-7V^TQjT|9 zy)?F38tMB~a8=y&;9Av}H=Tutw@#dk2d)B67xDHD*Tr-*>AT4vf((TwuFNkm1iqcO zyb=$y+J0U1>JcDV_I2C-6%$F-J~hJBhM~N+7wvAwaBZ$)3P()VqagIdB)*Q#UTZV9 z4KEy5$NLzrYr<&E>fXEK)hZNIn?x2v7hS zZ^g6Qf#Zfmz#&g-e)^LZ>ZVQH>GGBL)GA~mUP!L{J%w#CBX(wV{I{SOA(SJm1Bw5~+5iRdGN5F^q!V~o%i--%3zr$$$JQ~vWO25$%MF;!Y_+Az zy6&Vu$bMBhcP8q#Mhu(nC(x7lB@yFs0D;F95n#V=-sh!>h4gfL zlXK&+zp>sh_EoQp;As9;!$qPb;7Cz|h0~9Pvx|j;hmFHiP~uLmf{kN`jT4EDQ-Y1t zkBzg7jf01S!-IpPf`emMsLDuAmYu}zrU`Y`hPvuPUG<@^FQNP8C%7a;7CjCQYdrtu zX=yNoNQ$ujFHwtC@+p%u7m3z08n^c`J=LZ5_DSgX=_$Ok~+iWQ@#Y*~}fk zPfNF2Lh({DFm(r&R72E-`6Vn2G=dtRuR5cug9u;F0gb7J8uV&T8V!Vky7FO=WJXQPI)(m+{hp{#UJ zR(dEa1C*5!%E}Z2U$f?BWr4D?LRs0Mtn5%$4k#-pl$8t0$_-`Z;U~j?fr+JtiKT~$ zWrB%irB|)0N<rh{nW9#Kg+L#LCCSD#gUA#>8sI#OlPv8pOmJ$Hbb~ zgENH1W`tZ|e7xZ$Kf}hsD*Ru*78%(985#Z~GSx?9QIE(59+Bablc|!EMUj&Ykdxt4 zkf~CTMNyCqP>|t&WoOfavg*gMY&bMSe}q7PhCs_hpcNs|st{;R2(&H)+7JS53W2tS zK-*qclAYn=U=^#X(vn4`Z_xNk3zI!z{qlsB>?!M)r>taRtY5^mn2x-^lW?Vpy8Bl9 zWPVThneel+&L>kd#^~BCCUh50wM8|OVX{_s3aOXhZU#)jv@GFG2wVJtYn0Ehg>U0> z;^3h_qdA3>oJm*z;G1{;1G`U>tipgvY=ja4@q4l*vSLz|V5M91b7J;FEl*8qajv*5 ziZQI53~EnkJ|9}Z_dPucCvRc=cGmldI&lfc{Why>3Qe@KW5#bha4{Q9d=?xsIpWW) zTi7UAz%1tK%lUSfk57gOYkcMyCJNPtwfS1(L~!3OK{ZTyop=Rn!teRmd0`KuaQ zEFNj#;nc|A0I;@ngkb=W+<{?0%nb0b7#TUYN2l7Wj{c-^c5J0HT2)f$a7LPg#9Nru z*FR)ShR+g}J?l9SgB7{0HJ21ifrx`NjmnNpx6t9P=<43;qQb>zEb)Vu1*Vj%Ah(ADK5y7W zewSU%>oK`HcOPl99#KZMZigfs(MGjC*cheTJ8BM5%ncz@z2G~$80fiaN|KU#c)J?` zDE9gdi;4m~Zq~|VC39x2K(Y(crBJUH&|QG#)h{2*RFa&}J-x&A0tTHLuo-|aBuCQ6 zLAxD;Kd<-ZSA1xO_VZh>rAc^NQm%`Ct_TYzZwVtBYlFE?u{}3WoVIH!3kre#v z)xzn4or?w>wIF&gAIscObAF^Z{Khk27v~7{J&J2JfZ-n^kzt5*@uCqT zEU|x`)OdO`{hk6PX*wzTpgCh|d8Q3%Qh$qA(k9s9r{c87=iqp`Lf3pb`LNyTjn2zO zAp(l_Zv;Av(j!hq^mj)W^xA2992EY;eRGX(4G^jBFAKbCnjOMF5SVhB9!Zrb&sGhK z76mL=yE_}m1oH&Q1WCU*L~4_|r2VRXKf9ah-}=kpyEnYDZnlNh>@(9nOMU;%OGP7i***lGL_)|YC8C-|J- zU$*hFLG$e7B$vM|O5cg&CyTyH1N~cu+fCE?f}b6N3H~titG^x$+oHmPLmk^E?Pqg% zNwxj$f4}r2;FF?O)B3NNbGNDx8U{Sx2fOKFGsWCu_M58ztY1<9KmMxzG}6+NzWT;- z+okh<=;&;b1t5Z7#R__ItRR7Z!Ccxdo41c z9I>q|%u-nyu)9V>s-Cg_)AFsJM>2c&ZxhwXvi>8*XTa$qp&O+BSf!#|qsID=9Dfm~ zOYUQp9_v3!eA8`lig|&?jDdGbS*7s`bs4Gr-~ZHoTJ8R%l(qN8ULsM@BOv;pNnOTD zs$y0t2O#qy>p=4DYTVmRvp{wkJ5u!qlJItHtMjEkw$@#7l6kpCw*hqXX9dRJPK+Cv z&YNU3t_+bkA&Zhd1fa+pOHSuCagtN)Mz<)Arq2q`JJny9b%K&aprqZF!zP)}f8t@w zm~Qx+4&&k&X|5*GH{pv?J$y@}Y;V@RN!*AUJ)$gVqiveLDLn5!WZa-v-e7P(Alr2{ zjr5INlv3Y*;258|CFSsq1_iWm8@!@QXrs$OULY=x z-L}I4nU9-kG6x}30PVw z4LuNs5w19dRU}UxdgEjt=+W@%EVHS-5=B7d9 z6Bqx#a|ZhcTn`{Nf6vT>eP*8tng`jKe+u5dZ@LR|8yN#e%-R+E%qAo%;{wS^)9ED% z+iOgdy4kU2F3b9M>+x_%zHA)78>H6pc8X;Q?v`Mpbza};O&8uYV3Zq=#MlU1^Et7a z`!<1R?W4{Tv&O)b5FDr8JEX`0~^gopr0m}%ukZn#0=k%YP>x{!djYH zSF*&>*qgBANdxW$1G~9Nc{X6c-k{@@W&GV|Kw%L07|WUwwA+S0|K43DQs%YzhhK4l#s4Uk(Nsgyji*3S^WN>4F+U`U5NpT&S z%xQCe31L`5fl_J76!mWIi1Tgl^82#u)VGZ*i9Z|WpFe9oY8MQ_z$x?(5LygKySkJR3OGmV~o6l$e@N6=s{jvy< ztt4OP@^vUIEs%6_Rs9$S9oT*BW#zYUJ6c8f5nKLyP}Y>^*9Ap-bT|f95FJiux1lO6 z6dwmaAA^mSHN2Zoj3COuo}9d7jFVS8U(GmMUWe*Dk5x=Nf3vskF@6HbV+DDNiSnx) zJFXi4l#lt%_mmI2q^FQ9n><;+ij=3u#P1@QNj0wOnuOgT@gYc* z%qE{r*t8?TB`@9MV8n(dD!!LaSXIb(k9-(mTv`EZdvp#&y=;|r`Yvl+0CfT5+86|b z0Ue1n@%%{Hr?Lv0faPaEJvW1_$I)yUFx5=Fy@E~5z=%r$;q80tzRPb1;Q$b%w0&mL zdUkX@r&ZcmT~>?cZ=bMqP_XOvolu;DiHvSxGV;p53;GRQ0Z3Oze-~KO&9E(c4%xg=Ij{})Zw>$ ze=HCl04#7V-KR7d{wv16UYX`wYJKzQsGSWS`FO59`8mjze{_TjH)y&xk8wSg`PS7A zDZ;--7aBDAh<>I&BR@i^sWn|+O0Tb^sdlvs&k$S-r9KK{j0j-iCAgM@@+<}5f&8~6 zuE(dKJ_^)tGzbrH89runTwiHyjtGGL0fz?^W3+2%j)ICzo33w#)>m9Sh^~cN!vk{H zsE^tsHJYxC8(ohnF1mg}wyB+E%^x|VoEu#a6uz1@G6x3n?K2St8GPX8Dk zwT~_#v3)>q5!&dF&*(WMx4pIQyuMH_wsF)c|AVa2Y=hI1vhCu%(BhTz&pOn0_sX}s zpF^XOLv!AdmxaUeXpDo_|mh9R?Q*m|aDWBKgAFo~#Ua?C*<*T(6 zHhJ)MjCW`IhQ=q48+BaLI8sU;L{WuJ5BH&!e@zs_fzncI_|DGn@XrA=Kk%NZ|yK4 zx*Vh?YSXEgE|I(ZL)0K_x>~x!bnT}sdwxc^IPX(jwkl_~c=hd`Kq_Dq!=?0cbNfWR z(O;uK<>l~8-#)_zhdQ&Xe{L>{H0M39h_F6l4Q-Hd>AO83wWL+8xg zcm2Kh&s&Ri=AL~%d!KuNS@+((_uWSfZc=`K%<0m8%{F*xda^*%h74YCl~t@ncecCd zm)RO_)H_YHinvq@sirfEMBR=qwhbw}M~SOC@*F@*8rt|K77fyi8eZ#-E?S?z(Ldzc zOYZ4N7_zFa++>{=bxoU{nl6#6&sY?p3eaO`;O@6PmF#U#B)A7sXy4K=^&thDL zm9MS6#A4Mh{n-g-k`uGNaFTq(?TYZ(&cxY&`{eEUuWvNy1JM$Q)D(EKi!Z_$F(*_! z<(6T35o1~rvmyS&;j2)M)G)+@1F zb2h`l?9Q_v7MG5iIgvj!y?cC$c`7)I%PTij)$cxdE#0mBnWlDxlwQaq) zeXSzJr_1ULn;W>9w)x@qry$~kCl(?(tMKq7w~`2MLOh3!r{B0^BvdRt5BUeqs0Qxl zw&HLE638EzMM�lTbVg)pq|H$xViLusQftpIx`8U#VF@HhD0KqeiDfCBDbqDzYq@ zOVhI1D2?f03q<%pB=R3J3?kMbBI_X=S(eVFVL2adxy)@;<0@C<{vl1AJ!ecKZ+$+pR;1RG;;((q4gSc{OZ-p5#YlyZH}fE*Irt+lBNYK21y;y_ z=pQEG4YZy&e;8TWL+o12PG@O8Iuh3u(=*|MoU*Q<39cqCtbzD%>RPKabTC+LbNkonvad-@DcWx2c3D+d>pvhe~KpH zv1+`g&SS22%~IVk*XH*1G+@!XxcJ3L18(VqfN~^+Qvg7_SeK&>jqad@|L(koq!@x= z_O~T8FuMJZ-qE6ZUHsd_ZTbq~-R$&(cMo7e2j)oO!nTg255FH#!RMfJC$ug0AEUpP zV!7dFW_pu+;d5m>G<4o|X4=I)5|=~zA3^oZ{dDY4Dn8ttGO#8Nu3_)r;o$=cdG^k0_DI za~ZBCk{37mcD%Uvol{;Ms?q9_a(N{08F=&-?i}?gzXjr6Rw;BE{-N3%*-|WDc`bL@ zRAmIM&vNZ@H?`u0=3>o8;djrrDyyv?r_C<#7gr?=w2J%v58J7+do@bjKzhNboR0a|RK)LQGHwpLd%G=DSE_ zdpBu3O#Yhu)<21v30k?VyNNjR&19`p+C;Q`V1hzsKmp{$-!`ou4&S&QZ$LgzHfi*EUrq}B@U#_?r9z5%_{F^_|8AU&0QM3DZ>A6QNt3|}1RC=za_VHgT z++TU}g0d7~VQFx%E=4w0jtwM3GYK0naMRM-n3``?(UPf{Ttk5mUI4gjA-u=N`wXtVhVGfbClTL>HGhPSm$MAv zz+mS1jE5C7;5&3r?!xqo((B6(t!v2ot0lgc-{I4x?RinpTeZNzCv=>!0`satm zoTneK`SA1}n+)}=tk9mJUhH~Ce8*wl@ii1jx(j>l*jU7W`AVL35Ze-R(1_C#)X!e` z($w(wC%$fkt#o@>?TPL)7^*PXS2RyzktfwrO zMF3xZ+?&Q{RQ~wx`7uW^+8?!K#2J^7|I+O|=K__Yjne%uZ=d(qJ`ey0L=>;1(t?bS zufgg-ulf14kS?#N@aF(ULqO~Y(sbo|i&rptYzy#D?Bs{4qC}{~7<+dcFCzKxNKN?V zU|&_E|MJ?9SzN%X%|0EGE4kx5-2Kcepm8p9`~8CEMYG0d)&MI_!qzuyo1PD^S#ia` ztZQPSTUkQUxl26lT^u*z?(#lgHBX4QoXyD8Iw3z=GgU=#fcM8Pj$Itza+i8c*6PYm zOw$0uG$|V|dq26ial)UQFaei{8L)<-CIIUvHv)j*@@an)hyPGepJuKJw`TW0H)r4f zRzu>58vxVIJO6tCH(=<*zZ{?dOu3(`(PCa9%77xZTANWKGe882cA2_^fj6oSqlQ41 zKj^Q5!9*Q~E`h}|Ct=t}r&j0k`sGOSnJaumomYRRoe&F)K64+>HI-nIuZppt)CU<{ z|KUq{Kki=;Ud88yS8>rd!SR)>_!`f_7a36~?U9UO;i`PWqj zWU=}dh?`E2`uHZRv$m0YCi9~q8TSe(qw_Js$d2)tFb35!j2M%;YoxnV4dIHJ zxdo_h2b{xpjn0AG3t+8-mrn5Cbz*GD0dkT&ni{wz!<5Y#r zTRQQYE*iVha(;EnW4MvKxXF6j*RD#|&_A-^(>9Vnxka(XaNWQSk#V~9g&LOfuKX}W zWN=^E1{77!<-5%&uMpvrS$6-&R?!5pxX8TU+O1^JwRoLV>?PTzeS9(FQ-2;6J77HX zGNp9|8oqQCqtbjY^Tq0*Al}_^LESzUliJ=WMbUn&p?lh-`q|V9Ik$a|oVM1X-wbbF z`>?-YP&t`@Eduq1zG+?8cYG2tFgNYE8@17s9W5hL%KQr@Pj82;6hB+f-~TVex+Fh_ z=e<0@C->|;%(#8)J*DSizT18iCCz^uE)GR%dh`0peMujVJPwA){6&)3z7NEcrKcp5 zNd&U069F1(ziN%u#BUoiHO@4Jtajvl2P2&Jr`V2@G#e|mioRS|B7lrQdEP9Y%RRW#eL=b-ON+-WmdU++{2wA<4A%|fT zf8_l5WEqD^@e;ASxW!0*F)m#2smsB8L-{o2fZkBkC}C9LQU7(Mu#^fV-Z{>l2x}-c z7O&Ggg>8aRMJ%(KXv!XQaXyz`3v>QwsbVBWM4`=CW@pl=Qedbk^cL$4rt~-li53R- zGmJNnq{knSOkrS4@nt`a+6!i-@{S}VeM2ez{vk;nCblQW8wTn3cqFeLU?*X`VUvFU zn53@Dqzc^0hxDTBrZ$e*HtbZUI+bYmA>JTnh*$7Wdd6K`yvOT!w+LC86kdC>=}xLG z!|7h+^@4HS25fCl)229_8_i{l3g2Zv1pdO0@vM!0^*~?WUaJrHr<l((JB5a6 zWO+^TOp42#J@ko9c`Xp+vMh3?cOccer~}n5TKH&}x8S;>pq;ZI@AOJo&PnoT(n5jq zvwFM5Gy7~Y7-S^DAUjCa&T}(w>h9=RDPxvEY0<{|{dhWKy=jZd9v|XO58sgmhx=Qp z9@`AYv@&51PWNVW=TG70&2}kIjn~OJ?v^T*h6i41^bv1!WqB2HWtsZE*+1JfC^?W} zOLC5ayl)vR;n6Q%$@>#ytlGI1s9`kgos@mFqTwe*km9F%r?}7D!`|Up8t8BkPPr~i zDpTr77vB0lz`-f5=`zgS=vd7B8+}TV)jLH@w?CJXXewZXc-TgRLTW6@&paN@c8uX{5h$|4_WhV$*=Et&YqfSl8Ehauy~DXDqJ{fvjl;kIHKy$g;phd z1{=rrfBQZz!QSC0peU;?pouP^N0KPY{u^GCv`U%V#@X)z#|gl3;mqQ=eb?-_``w^` zWb#Exe81+gMX0!2_FK*#UnQCNu($Q1kmr|=50PPS``&K~c{#G(6ydTj$9#o1^{sU&}b>V-aLn1=a4Es}*AfQQN7+x1S=w z%q2a39q{?8lf^w$PS$h*@KDk*n841E;(5-FKV^?}L3~tJA5y6ILg?V@_hd6oz^08jMby$x0`46L0^l6rM!*cWc>m4p&4ZvKQW6rmKk z6V+T}Y}ATy9n@p>*i?oqpS~7-ZLl5a>b#B+^6ATLVYD~=tz%`*Mz_AJcQ>=4zT}df z^kuK(E9LDE((5rUyh$63bq|bF{ZMaT4}Laj*^{(>hD-LAXAJn!K+%Grz!!3~aX5vEBrt zA>xvO55PKy@c(O2=MXtszBc%jXN`otZJFUP%66T?15);o|2n_=%`l4Hio^q=M*U|g zR_YxM|C0*XN?x0A)ZVkuni@~+8Plr*m!_9o!> zl2r(i9<|~R>2G9s@eiKsFuubN_4)kffZ2AF>IH z-X2@ap?+|Q`SxpYv*6qGllvv==Q?O8cuaHt@3`#kwf}m+G63w zCDvOUEIPgFS(&EM|D2EVUd`UBp8@ShlZT3~q0di&qFq}H^j)Pey0~cyN^vt<`Wt|_ zIecl@8hu?XM`w7<0~m%Ftc@5qL>`C_CLN=P4Smgw^;3lA46P<)Z;Q-+mGA#vuos#% zY}i+A7!dkZQ9I4utW)9Z9A7>YA+fa(p6KM+z&2oBj`!!W9@m5F2Q!giQbs)!376JM zq#mY&ETay)3zH1|vd`yZ3hUVZVQ9-Nlc7OqVGzQdLnN^5b6cGBVl-Dx#84IYElYojIV z%j+u3LdGIaZn`!LcV;aYn_+5wOtsE;aW7k<2ZTHYnP_(f{87J#elhJ?d;NEunTBe1 z)+x-%s%?|_R0~7(i}kgeh+MB8jQSG8w%VK~^G9!MXT>O&T7T>BX36uOYk?IzD|b?E zWaE4M53lj*3P)a+n`0Wrb>&vmCUyn9rwSw{DEN78^pR!RMqc4=jFf5~gTxCXln29% zSb7ed1l&`@3B!y*dJd1otLw8q8UJ(at1e9Hc>?BnLR@Q03A)s06j&V}WtjNRd8gNi zhmp#jH&}vz#xwNqVVG7pgzubZ+KuD?4F)KC8ZA_GiddyMzQ?vizYq_pTNIct#~Y^D zefP1Ui{pV{s~Wbo;H{LSKDKp!CKF%AZ@kv+-_ed4Sb{Y!ME(7}veGm00E6Q*Jb&D- z?ax*IjE)R=_ckm<7$LY+5%|HYSd5~WcqSMjw85*mjG_-ewMZfr_`(WW6 zV}wAcB4mPBNg124@m^zwXa=v6GdAJi1!0C*1g}yvHa*1azzp$dI#g}L!oGIJkG{dh zBQj^9e-IKCyvocd`UubDK}bgMDjTEdW4!G37D6|2l7B&C%uYjTSIX86&L_;G(eXY6 zcrPtjUSow!2CoV+HWA_lVTEi2uZjtyp(L|gaKHIab_Z5a?6Fh$&mW3=p*_WD7k)-` z)L~Lm{ILST@sZsnG#W^I+I0!TPY29|jrW1o?GRBh#Y9+px2u07Gk+n~0Q_msIV_AQ zX7UWi6bFT0)D=8B-OF77+Jn)R@kb2TLPEBUg#&P2fRtK)vWSxHUSv`N0`x7~u(07c zIsRgW9s=2p>dHzD#ZPH(GyWje7vFyeqTZtO6_I;(Jws^T9;l3)M`x>qDjK{N2-Hlx zr2^9Oh8ON)drHv^qD;j<5N%KPfw2|iyd4L{K#F^s>7W9gU!bH zMO>49|Kh6XWftCxG|J+b(*9c(y*Kkx&Oyh73s6=|+g2EToP?tM12o@4oSn^+t${Z` zpktQzTK*>$ZdjK&|Ji;qMN1tevf`X>zq++R#g@F`28=zBIRgn$B=2VBEf9AL{4Nah zSN5HO2K}^o-v_i`XXIbTxX^`Z!hM+f0t*DK9cQSo#U+a>#*{wDP%3ZVT`N*-JRNX% zYk8{a9sk6`F?H(nEf^pptev_|L@T9S%KWIPY}9Vy5#U(X)zK(REaj@kUsRCtLzVq` zq4mR0_+DX;{5oq&0_j<(p6Ffp8crX7mlW_}-}^+U1c<;bxKS-Fz`W1VeYhfFN>g_l zWaXO9J)dmNA>*7~OrYAXPfa0ZLjC$@zCd1)HuZc%h5K}2pmGYQenTbzuQ<({E0*ee zg*G+~EWOb^fnEcD8c$@h;neScua>ttpu)bA%T4%FCI$W6f&1+;+C7EnPH+lqO>i^T z;o}inu<&5LophC6bd}z4l|G3j`7tKymIWff}h9L7+?KJ zORal~`-&XTsR#2r&RYk4g@R^jTV7^6J|bIwW;+2QTR|e*S46f#Bt9z8e&z4*I_YOs0H*IptSHabb}!HIUc;@@ z+AN588;xsYO9E(lIGSVAf(_A)+?e)2S;&lfH(QF+>FUw zVb-RLDU_5)Gsqj$Ltx{GtTv=+*naH*=-p-K0eD|t0tuflp@>QbgPzRhC+`eHuzgFL zj{~tp(!L8&qb>I!R)3k>!k@oPZM=X==~_kdQGa6*Pa~jFH|jx8Dxlb`id1U6@xUkf zjFGQd*K!G><8t1+4wYpN@=DLu0WK4i73Q5XYb> zy-?mV1*`y5GHfJUoQI!kup*-1g-^}Q(N zX|SY0D=}@56twiRv76%h>yh;s!Z3{br8Av9$|7k#uJ+ZSfwdFVd%kF|B0Y@f*r2e_ z*S=k1!uVdxTRm&XV#yOu?NHuW7SoFMSS}-^gSFDG6gTcu{&@9DiPy|&=`|CSN8ZpChn@T%sQPuSwRK$i~BlB#Bd>le_IACn$+?i}`m($hUk<~=Ii0)??ACfSx=a*hd+ z3WfMlH`&OG7%q(hf1`LN)JEjfJw^~AZU&Kl5RnHFHFsG~IjxSL8DQiJwFwgJAff(` z7_29m(EVq`9W;^*8fpD!WD7+2CLitbdzUDrfB~*MFmlCk1`jv!u!{VY0a9@3UD0Cs zf}QAs1EaJ%CCBNb!_abLB?jQ(6~*7L=)w#RCTukHXW+yIx4)c{ zIq5xA6ODq6TxgB};o|`AXgI@x#LgvLw*#WAUoV?;)Z$Q!H2Z3~*JTK>q0n6cd+W!4 zCl~~ml|~b@Uf6BAzc{S8(}*}8Q+)C4S&|0%f#O<2i_(oczfu|zWs(5H)Q3TBv=+zp z1VZVL_u`K;2YY4m7`hbBQ5d7P-!!u}Jyc~v>Y_=ja8YNkNFTrO7a@J(ukY#H+V>ko zC2}gJCDsF-8s&12l*-BCHL|K!l(N{^I6NT^B~z`XDH>42&XGHdnfLi8(YGHbthcoPyl{l+>`j0C zIw2uxvah-ATQW8ls~o5f=e@jl&7FdJCqD%SEfuHcx|G7m?JMIB0+X&>UNZA+smjv3 zg(sZ@?=tV9FSQ?7^q42OIaX$O7YfJ6cO(Qne`T9#`sD$0sPCE(RW4XWkDMgEr?fi3 z%gs`nrZ*nbj#~a{&yv8n%E`Tp!oS!^k+mgB%)mZXMA))lC&)= zvmIN?6GfTN;~I41T6E(&bmMw-;|6r&Ms(vQbmJfC#?9%*E#)SxkIUobJXmB5Mo0bUR z=psuu<)L5D+_u-#6zU*_0Azw10kXb&$-qW z!nhUR2^ilg67USv&}8yJO2FbND%)A`w;Zdvm@TnU&BD1h8*Dj@nY#CV#FA82o@@ zO@7~f3ptp1DWFjr=Wq!S$xI3+l7VkHj@R!)djk%jvT1}1V7c&UK#8srjq{Bn4)KBY>8OK9v_`|gSfn1Q#QRN+-5OJt7|f!v+IH>T4hS^=EL+Dn+s2g}33*&bQ$~E{0P| z1Pso*);|%w&PUPc=|{}G5EZRUIsxl^W(t$k4d}E&zCC1Qm+_n za^gi>o93XPTTs5b4uH@cvH_+B3{d#u{FiK#LIyhT-(sA3wF1`0R?CIPKFZ1cgnxVF z+G3l`bVMnw$vx~|P2^NVm!hk#C;S?`TR6-Rr59Z*#NQV9ml-))Kt8+zmK@*l)mO_5m!zS7?uH>p$lc{f#kV zk^=L^#IJHnVnkn{@61fBTZq2SI0>!Ka!zu)7 zc&G?y^OB(%?cdOS75=gum^)WSN(5dDseLTPnl}L)54<;r9PA?; zgYIRT|F{OuP8q;N)`26%H_8}~A?WU>R#SJ7o*wjhG@vH{>}ZE+_ANC(r0&bkP^o?z z+ke8-XW#M4^q^M}V|HC$d3H$o!E8s7uyWb^D~6oZ_#a8B>8TWt?|DBWy$ked0b`?e zKym{HX;5zqK(h_Mf2BME<$UxzXEYJ}DRM{Xn*PTPM2z2dmc0!%nMMInD&L!Cy4tV3 zz!E~mK>L&q(L!@TG-^YAry2+W+8XXqE>@H#EcEHe;_wXgq z#RzcT_vMUQ062i}2>Q!9P%EGzpjq=)5G_#?e4Dd?tXxk3QU($MUOec3Iyd?b<>~?i z=9P8#ZOsk`391>er*9-!KPT=zEZF`a;yNI?h0*eA)Xi6}1xII1Im2Xmr~MDZEK~w; zejPEQQeO-{4?Nz+8>giL?D%ED7c8T+(5NFjvWE*cxcLvXc26O4cT)IQ(4DkL&mIBR~=NdL4~=&&%8=b%&hwZiwSK?}v2+bEc(A!eBExb1iuL}+u6WtHh946Hl0 z9&_2Wk~2sw@-FQh#x?iYDfrCn!HHL#)19eONMFv=-N-oQTU(uJ#|GbV&*-c z@eY6T@?%z;y-oZu`tpEHw6$YpVBYCNS?&FxpW4AD>*(CqXPygp+U6)ue#{D6Q!z2v z>!ebPq)%q%NsOZ>&c|OX&F0kKm62&tZ4pF2%`FfO&wG$tCe6;x%)uicSI{KQ&P(!` zAoK}VZof3UAjxCGPz|iyIcatglE*}$30S$i((Do>kBLJk1QXV=a*@(h7$kBSq0ceQ z2&Ad7N#rm?&G@e4+%S0I6ZByZLaQ;%_@t=_nW>42=PV_oEv2QYNJ+x5L-{ey-b+)F zlZ4@fx?!4qJk;v%4_+{^0Dm&$QP28WLxRkItDo`6U#X80fDV{$0k#WSEYYV2(2VnP z;F;Cb48Zg95-zy`Ny#x-(S1BLa4Xg8qQ_VC8Y`Ugt`bN5uoRfa6aO3Lhr5XOy`Fs? zu}7`QamCaC9oo9hnYWCdIE0BVz^H(%M2}&tA*tNQ%9jXBul(|yAF`BEnB_TKC|_=r z%c1que~j#Ccy=;P0vNL^`tQLVBpo0wMByHtY`qm@kRdP3_X^lo zbr|AI$lc0~l`5}+h|~p{MHPSd4$nP0o6hz+z6`sfjx7G-QQgV+DUZ$lO1!c9{6VSR z1(UayaqILF|GZNNiOu(lW%x6cqM*ZYrPJ~=Q(lXsiw_PiJ60~G#w+_42jYSG3rD9q z4aXK&gENami{=ti%-2;p^*z(KkY<}hiH2-g;ZQrRl*<9?l%(0^52g$JXmTpZ6tZ&! z^Qxi&t^^Rl5=`3LeCJ`es$9;Ptd7k}eOo0<4lm;XBvGjjfB%GL`1{k?C zMuE>SNwle|lenfrOZf^ojlMqjT;n7tA<#8baB6xi^Q&8%N`s-IEAFuydDzsqQoiy( z`|9u6K!MUfL1esER4H$PNpyiZs427oYSMt3%b+G_AE+q-g8To#Sn6P{IBC*qPF_*A|daBLfjDw(_E~q!?4(cU?NDHWQxdoc$1Cj4(3yMh~ zwm%GFb|7XTm|*V#k{R(@nMom2nekd#pw2OZqQTFA%o=){c zLBD1mwc3`RqY&=PW3TI{{jZyhpo_wWUv{M2yZdGqF7zS)eMfr~m!E#So?_LcKNl8) zl?fj}!yA1s!>1_9$=Z3)K8nudfBP2qZ@#!6R2up&wuu=!`!_X~FYg;}Uhy{6d8Wd( zg_ju2qEu+gX#_{-R073IKgbX-4D;-lQsggE7>mn)FKLC-dh83%tc(o#Un(qhKOdRgS#k|%&8OC--Z8n9PHDo_a*fNnCX)xKL|8QSNyME;1 zlf6)xH?WB4Ikg9Br`u0Uw+FFa^DR6arDY@bYfg7MnLDmI`kdn^)AA}I-1=EHVX3YL z{p)q1#S?tCt@v&2hIqE_sSkuEmoW=&Z=a$1m{<4@ynZ#(F`;Gg38z{-+5M}U7unAk+3MNtl*PwQ)HnhslF!Q2vfb?) zo%W%Nze*69Cu$k`imLw3#Ug?48uptIiil6=&xM!+BfRAge!PZM@$uj4IF&ht6Q--b&j-NGLEu$uZ0xN$X*e zP+*2?W0+Adl`$QxT9fPJiIns!H7Uv_cde1? z>t?B8Axi8|7gb@3`X`741>B&Z1r*?KoO~;b&d{>7i?(#4qmS?5PndQ~eV5#4_C%i{ zT+#9~9cw(fJ1FP}12Xy;Y!!e zxREN;eN~!?q}Sn2*QQ3&&wwA@w^C&WuS$!~;o+W9*3*2X^zlg^wUOLe@nu<)&ue_- zYW#>|`_HHc?!;7>9C19= z`l+aZDZ-tqoaO{_=7i1~q#k@v{QvQxAkPHytZN|u2;@J2d|B5Br#S^^vh^Pl1|k9= zQuGfY1Q9o?2v6{rIoz#q*bQ1_lrw)0vSeG)K?_n&SWw z&3{NFi0DQCF<2^K+BTPbc}Pysds9@U%SSCzs)O&3lkZk*-vM@MLFHx9Ow=Nu5qA6c zyhd9+>P6ZGqUdNx2K|1o>B1XcqEZM>A<|>yW-JrT-<2u&5Cwoy4)FEhwWeur6P#1{ zorXm%SS3T>)bpq8FYCLJlJ7WoJJC3g2ZOeBp<9~)2<(x-93JC^S7iw=%igXw?_#`r z=yiz7NHul)_Wtu7E!EbWSl?3839FdihmmO;<90E-y>i0v= zc2}$h_i-X?th?0)gv|2J=A>Zkv3=P(GeyfqWeys;eV6JaM&gJI{fN@K@?`U@8Y zAN)pz)ZF0Os(wKObbC1AZ+!VTV1w3+s)Jm;{DY-QkoP7d^W{iHt8NmHzn7v*+g;?v zcDmi*OgMeAWbCHGqV-79ijrDVLo5y3(XNU1JoTjU!t~xFyX*gEe=IX(Xaq_a-`04ktPU$7~R~ayAIaUE8PC&!T(3ul~k-p*}wRB0l*^u$6gG0r9e3>^_cP@P0gN=tp-xu`=Rv@p(J^RI?kmye*8_!9|dc<2q!mztVw_ zA+Wy7^#ZCGX2Hwt%>YFbT^A1tF7F?KE;#|Y7D&!qU!b{d^nDb4`4ZcA&!u9T6z`l0 za6ihL0Ojr!N_*OlJJzm~%RWj1cw1nv>vp|GSS>9PO-G<*=~`>+a@e6*EOkdt&C*GC zfymNMn3H=>tlj!!y)<-+FjVQ1%)+9mVJ62V4616QE@>2p~2{PX~0*efUQl-#?bIW#G%>W zBW4%F=O;@5*;#`)+T^=piPs9^Ml8V1C40s0V-C2l?HM2c)a*{`S_Vpa$Vlo7p5^b2 zpoav*jJ@9F;!4rRMqAh+aiVSZCJ4EdBM|DQ3r+5@r zEUY*os-HhGy80ooR0CWXYt+FfMAANZE5iwG*d*Bx@LVuM`&O5a`yDXW#DY)grF}>k z+wffcWV5mHT)GtKaquc1gxX+swPLK9`Ebad2}t|UFt!nr(BtA&VukKtcHLvFeGWc( z+cr<(%f#45NHuQMeY*{nr_QUpWQbBmBVvObB9PE84pzt~ zJA)9lCLJk*(c9e>q0wplW6z3tSG}F|RoB-Ib#>>ba|~GylgDlq4iie(e^$#|2n&ZK zL^7dr*M}?5T2$2yJja@g#FxE=W~5-2)yFvUB0Evk2bmd8emh*fmOG_MFxQUs^Nw>U z{K(yT{yh8myRP^Cijm-K^JOlZXLb^~=b|;w-}WR)`NqI0t>u{G@0k)75BI%1{XR7M zPtorj$G#N34(pM)4ubdGUhUw2x;IgpJ(Adnzv3F0K<68J;?h&)NmNNGiltU-i#MH< z?MUdeE=$l-Dy`(kDz31txOPb7wHz-OCKk%{N)*U=SGdvBqtcGsgTg-{yd@n_Kd0Re z!t3GH^c*|Y^tYG32lA#;&Qfg+B4fi8TiCJ3b{b|B&cSWBHRh|JW|#u&f?H-j%* zTj6!qW4@zzsHZ8H;fnW?;CJ>kGuD6RJm0Ql#5PY{RL^7&A6M0O;X;U4F* zro67N8E^5+$@Q{?@h;!VlkJLMQbS^;vPquWE@!2sa?_Q@LdrqwzkHoxr85zM#|z;i z&HIj~8^2nuaynLedG78?v!_M$S`6T>D}jE2zw%sOd9`$$21nen8i(y=G#r(<=!})Q znB+Eh$lj3XMltv|8Dzg|HfV=WP5aq+rX&NDs^~F`p)bdwLrEO|)4K0Y+W36uCg)GO z*bL1Jy07O?+Ss(#d_Bfavz`^jNY$K-nv1*C|IX>gzrG~j;%A{+5sk&VO4-3r1fcsi zzuRu1#>M@BNYgts?A;XRH)=6}&FkqcKm!U9o{>nPN^KJ$3L@?WWwC1U0DR-=EtD&( zA7~T^k(EUQ(crMeCnlhA{~a1K%>f2-#p40qnFu5-Lvs^2$YcU$C1lY#hoCy;1CMvh z4~DtUfNM|zUP5*Y7=g3oY+iq~0CoO?UxFCWnx-cc;Iaww(%>wnQ=l{!v_uJ7iltO2 z<2eIxz!{41Ssqf}LTi5Kuloj%fD;saLqbbg&H#0g6WIe7^Zv;#VDTVg4=f^V%ytG0 zB!KH7@hOg@oHN}7!od@OIFR20Zl(PIygCvIXO3$N=?54ZBax6H&NFC={3b9Y$ppZb zna==f`gnlN2V7ql*jWKP^8WA8@CmT90CuixKy{`#$;f`dfxoNv8xjx!jlw{qz<=7C z0B#u*0G;GK19rdzZ+%%bYz$Q70Tm$!q_=>5EO=h$-;k!K-~w8Hdzjy0NhbCK2fN@5 zLmX$&C~)mO;F?@UeI+F+GWdOY;sIbET-)r+cmT>2*OuB3TsMPO2VR^3LZA#W8HI#K zrM`T-qPhu`UV=+d=RJeU@}2>Ee(`|%6gZk{vI$6mBO(W~Xt)tL@*LE=c8x&7H=o=> zHB>Ok!%4^~!}GZu4SMA+3Um;z?!W0b%l3eF#ul=_)GW9={Gtj@yXutlRD(T*Pa&X%eDY#O90M}UkT`8N0Hf|UB?1mENWC~ zsqt}on`g0u5Okm8-+gFq8Gz#qGg>)~ef55+6c zciT%?21LL3Tz8oR#=g5Xi&Tfx)6=w7kkGmTTcQi#jZvmrB4m?6!otAZ^&zmLhorQN zd5c8AGtIsHtgZ8`fzCwWcf?i1RUJtW@F6~B3=m9HUAQ9#qZ$uV^j;oG@P^+Yu2v8% z2zZ^ASe5v)+sVqSt*l!aI`bn+#b4dkx|aqwM>7u(n?9)XQn=DmNI3eKz0~i84*dS< zvaqE0^8E&d=uQ6O>O+j>$CT(juclS?Mi%n3Y^IZnWtgj#7!0+j*j`)kuVxO?h2$1W z8kg7$`21+`!imR+r`O*8jV-?z;!Pmc&I7U1Kb`!i`Wyv z>_WX6z!}kgF3IUhn7p2YZ2!>&(Fa=ZOuzq3^jfHZbk+kA`je~+HY~-VztQ-sQ00$k zN-g(^8`Djw;?>mp1~Bs192pCqSOV14KTL|bT66$nkO(CmgvAD$`^ovm`5lV<2IQ2% ztcSc|pr$Ae7<)W$mxD6~jBlD2ob^dB&QtgFqE#P)ua*@eaf{gI$uz84Y{JasFpq$Kcn$Pb^rHwtRsv?CDs}vJ zWIq23=|t$?0f<-cEC17Uzyh4=$C>)iU6;4q4;LtLZ{=xKC_q!}K&QT!62?Sr!bEN4 zXXJMzvlH6p?*R0@Uvl7nfu%sLc_;8UnH?lK0L;C~Cttk2)0P0T_UABg%67pgWP+0} zs$8sRu@tv3aCt-j(u5=9Q5pFv;7PqlyY1OsmAqL%U&|drG%lb*x0LZ<;{9ZiWg!K_ zeJsTW1eBy9!Gvdu{9EYXcp89`17NeZdRU6|=DC%pW;*{42+7$Uz@84ceI;$R3NRG~ z^FSTgFPtuM^<#u$3kPVsU>=U5S*%j6*8`5##XBG6SF^Ku0~%DC$GN-x*2)odR>d#1 z29MK|jnPAoHE{fM4kP_rxsr}iogZ#bNMvWt%kxLM4qiAh44QWkTSb70!aexK9l9ka z6SC)n1I??ZwEuxSVrmIr(-{g*O@?ce?zFjvLPfTlOm5Dch7RGrdy$5(u{qnXJAheS&;qPiXADx^4JtF}g zZZsaA%!97fU_N%n=*M{7UAa!5i+Mg1PbL3`<&NwCZ-yz~KwK!PQ(Bjo3bnUY7m(@D zjWtt1h>OQcuByWA`we8mvdV}a@IdKC0WuUo3Rpqv)2c*e$eekaBW+vhq`Bwyf755j z>NKS;(AP%G*N#e8=*iO-EcMw>^gg9wpFYA2y-(a~FGkPUv%Ci$T`ot{w8(t1?w^ z+0%Hp-z(39{6+&6HN>(N^_Vo)$r;|HhrHyCXRSmz(e7Fdh_2*x!+82UrIK0EeNU*K z#~d49N|MAXI{MY?k!P5cZ!3_-syMm}Ws=6m%+?h2^*QKIFk2S%^((|C%r+GC4LBH1 zFmI`r?N*;{YhT##qa&SQ-u@OxFDM^+@t+t|LElJCf&5FR_T+h{iP9_{rgl8J zZ_@Alc&MH~jtOmX{ixrh@E_hL3o>oH1K?jFjd!zlCNwWbUys{sd6Yo%$_OW#S z4A?XvPxg06QB85kS+)Y5d{XwtE^Xy2t&>xCV`JC7?ClpC5mgRt-;DMvH_E19hc057 zQKDi*QGj`nZ{^&Y$OZQU#hjc+M1C*ntS4*DbnK3CF`P`L!m6fy-O>HY_Jz8KHR)Vja0q~Q5Ystu`!3;?GCSKu|G%E3(4@z>F*6B6v@zprLGBY&H)P7e*^i@+b*wXMw)LEXxMS$zl*|{ww@k$8f_D> zj8NZXV%o=bg1aD!51Wz##cYd{6HjbZ!0a(jdo-^tGWIWBc{yMjdd6U_p2H`pXAxZa z0-89VItIwwqhU>(&=+7mGZb*F-FsnJwjtV4NBA1Cqxc#1sJg@?M;YA4_H@?Ms27@T zlMg#Jeo6Bu40>JT{kyR6t%>2{>(zxT$nWeQ^uO+)pU+|W_246u23iCxy1`P+rbhS` z$((U1A3@K?aY%z4s(NLdVYp7(AfkQsUE3QRW z7yc5wd|m!d`*zvRxkHy93SFW$zN3C|XB;cIJq2()%YiP<+v&DU_|CtzN!dwOGA>s4 z@@nN(-G9AJlqIyx+T*1(bxGy;V}1YP@8#ip)%{yTE@OleM6n(6vhv`3NL2}b#FL47 zEm?VCYB)*_4B`$3FT&i20#3B8#{XcAb~e7woAZ<0p<{wE5jwREiw)mZy=p_UF5{t$ z?EZ+TEssoy?rjHBw|ubow!0AQUh7i|P2t>(s^L8DigM~tH+NGT&+)=6)%cC>r5JEM~Pjry@3X%%LH3Et7`hGMqA(T;i-nQ;#^gRDmKt;g?BM*Q3V9G7rbOkXWx=3vq0h6$dbm_sP>>@ zZS0H^v>8lI1r-}(XLduI6{u~X;-e>uY_K!xeRsdIXCYE&)$*I>R-d&yDvLY{R_H;v zV!{&#Ha}NFjt#r0Tev(nJ#(p~LFS>k1q4-Uhb*9c5qUsFy7lB^!0#Av{>n?Y;m3+Y;n%ipVr?gal%Y9v^Z9n8o`*Lf1-J6 z$8W2ab7!koc_JW`j33T?C(TdgX_XnZ3l{X-H&S+}Ixg!gRPAJRXiiYVK)fhsyYu++ zaM>1a!am`^T0j|I+}wG*DLUP(QWohg*r>AKeDu*x6rUT_TYHyNi7#z*Pdq$?_cvH{ z^N$%RKloQXTDNnyTw}Iokoo>J#`syaJvR}S;>B>=$}{}4F+4nCKEhKyabV?p!Jtf1 z7xjR|P5u15m6N1F3jE8?DiA}K?BOWEy4Ux74xE(`gSJN6-Q<8_JmSt5`=16cI7GMB z8=FlQ-?G`zPj#Hr)O&V;1at?!(%v-^RBS@zjvJ{j*I)>lGy)8AEgZ@C&Y>EHtB2uQDd%G0-z*@S z&=A5RlCqfYf4Jb(>EhjUq|^$eF+}@U|RDTE-Nw0Tkk^Q>Y?fdkoRe8D)5!5!pqS zgl@P*FdQ>sT#Qvsw*6*^`jUO^Zj+PJ6(v$sAaiMJN~(A) z)_mIVi=G*0WNE*U^Y;?iud5>h_nN;X%CM!V=y!Jn;^xzaY@G#dMQ4kCAijB_b@)e= zZZNL4;mf#_>8K`uYj7!juS@b%>onAj9gE%&>`n}}moJjQF`#PM!C^AK&$Td5OYZTX zneLk>|FX8nPkR3J9670r+-Sn)m~Kj#$5u|F(U?@q)r_{>M~i(a0Hyna>RLv}A^t@b z1HdZ$RkHf7Sl_mN&2@yHD3_uAx297#k-#l63O1{KUrkwal6 z=7|2vB2c+ZmRAU|<&4aG8(9p48$dVQStux2)Q)@1OqDq8&_y<4(+Qc1bd|!(?7LbO zyOS%pB*VWYi`h6>u^(V2FvyFM*8Eek0Qd)p$K+4duI=+gCNGI747j-P?-&tJAfHYC zSgQVE9h1Vb-5kR;ls^z=vD((#Kd9AI=dfTNBQ3F0KTnBX-6G~kn<4DH7ZYkoJm1*! zH$;+~>&K^*>bb=Dl9-=4eB8-!R$@LK(h z4)7n%Uw*o=Frb)|(xLuVf+=!U?x%a3hUx5be0<`_we5Rw%@A=0CFiKk-#_+itFlkn z;@~V3W4mAAEK_4UUpUJgir4$ST$P_a?NmN;VXUE3f?(usFya6lsYInO znHb~tLMo5Y!zfwi#yDow3*<$PQZ}2CWewH(RI1}T$E#Dov0S=oYe~Nb3>_O^6er2~ z^+PfZO<*4ZuyPLG*$BdM4fEv6;Wkjnq+52fzGi)imM)R=?Yb9bctN3V&L@)H=6Ql_ zbIdtg7tqQ6QX|iYJn8{*sQD)NL;^82pi6LluTa=w;VIbkUV@G1r(TN}KmFI{Dvjq` zL{EzkfcjM2k9^&x(g9TBP~Ibp@M&_{%#C%U___vJyc{%ovbe`3bS%m6Ek@VcUlidb zk++wg^dkCBUl+c%A+6If22lak+`tuWQ6@^~nNbYcLo$k1d^VB?WVe2PBX|&qE;Vd% zX)P1W8Vl}^P+R=z{6DXrF6|qE&hz#1V zFH5f345Y^s9=><1v<_x*w#Ijd1lGRyMg<+@5-x9Dh85(XJpJfyOZRNQ-oXj7#a!Ki zCkWY__AVc+M@w2~7iTr;_%fyjYntC&UZ~$_X~04l+quA52cU!5T*u@x9^cZ=J34KM ztMqYGVL2Tg#r~wq=Z3RC9d>>N;m1d?BMKwHO)Z@%Q|9oI9J0|0$iW2LbGs1v@(wzG zvO|Lw#@~B7PQk(J1l*$R3LWNQ{Zb7g0wdb9RL=QZo&BlK@&6iGaW|m&{Q;r|h^Yqp E4~rF|;{X5v literal 0 HcmV?d00001 diff --git a/thesisenv/lib/python3.6/site-packages/dateutil/zoneinfo/rebuild.py b/thesisenv/lib/python3.6/site-packages/dateutil/zoneinfo/rebuild.py new file mode 100644 index 0000000..78f0d1a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/dateutil/zoneinfo/rebuild.py @@ -0,0 +1,53 @@ +import logging +import os +import tempfile +import shutil +import json +from subprocess import check_call +from tarfile import TarFile + +from dateutil.zoneinfo import METADATA_FN, ZONEFILENAME + + +def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None): + """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar* + + filename is the timezone tarball from ``ftp.iana.org/tz``. + + """ + tmpdir = tempfile.mkdtemp() + zonedir = os.path.join(tmpdir, "zoneinfo") + moduledir = os.path.dirname(__file__) + try: + with TarFile.open(filename) as tf: + for name in zonegroups: + tf.extract(name, tmpdir) + filepaths = [os.path.join(tmpdir, n) for n in zonegroups] + try: + check_call(["zic", "-d", zonedir] + filepaths) + except OSError as e: + _print_on_nosuchfile(e) + raise + # write metadata file + with open(os.path.join(zonedir, METADATA_FN), 'w') as f: + json.dump(metadata, f, indent=4, sort_keys=True) + target = os.path.join(moduledir, ZONEFILENAME) + with TarFile.open(target, "w:%s" % format) as tf: + for entry in os.listdir(zonedir): + entrypath = os.path.join(zonedir, entry) + tf.add(entrypath, entry) + finally: + shutil.rmtree(tmpdir) + + +def _print_on_nosuchfile(e): + """Print helpful troubleshooting message + + e is an exception raised by subprocess.check_call() + + """ + if e.errno == 2: + logging.error( + "Could not find zic. Perhaps you need to install " + "libc-bin or some other package that provides it, " + "or it's not in your PATH?") diff --git a/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/PKG-INFO b/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/PKG-INFO new file mode 100644 index 0000000..83616b0 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/PKG-INFO @@ -0,0 +1,200 @@ +Metadata-Version: 1.1 +Name: django-celery +Version: 3.2.2 +Summary: Old django celery integration project. +Home-page: http://celeryproject.org +Author: Ask Solem +Author-email: ask@celeryproject.org +License: BSD +Description: =============================================== + django-celery - Celery Integration for Django + =============================================== + + .. image:: http://cloud.github.com/downloads/celery/celery/celery_128.png + + :Version: 3.2.0 + :Web: http://celeryproject.org/ + :Download: http://pypi.python.org/pypi/django-celery/ + :Source: http://github.com/celery/django-celery/ + :Keywords: celery, task queue, job queue, asynchronous, rabbitmq, amqp, redis, + python, django, webhooks, queue, distributed + + -- + + .. warning:: + + **THIS PROJECT IS ONLY REQUIRED IF YOU WANT TO USE DJANGO RESULT BACKEND + AND ADMIN INTEGRATION** + + Please follow the new tutorial at: + + http://docs.celeryproject.org/en/latest/django/first-steps-with-django.html + + django-celery provides Celery integration for Django; Using the Django ORM + and cache backend for storing results, autodiscovery of task modules + for applications listed in ``INSTALLED_APPS``, and more. + + Using django-celery + =================== + + To enable ``django-celery`` for your project you need to add ``djcelery`` to + ``INSTALLED_APPS``:: + + INSTALLED_APPS += ("djcelery", ) + + then add the following lines to your ``settings.py``:: + + import djcelery + djcelery.setup_loader() + + Everything works the same as described in the `Celery User Manual`_, except you + need to invoke the programs through ``manage.py``: + + ===================================== ===================================== + **Program** **Replace with** + ===================================== ===================================== + ``celery`` ``python manage.py celery`` + ``celery worker`` ``python manage.py celery worker`` + ``celery beat`` ``python manage.py celery beat`` + ``celery ...`` ``python manage.py celery ...`` + ===================================== ===================================== + + The other main difference is that configuration values are stored in + your Django projects' ``settings.py`` module rather than in + ``celeryconfig.py``. + + If you're trying celery for the first time you should start by reading + `Getting started with django-celery`_ + + Special note for mod_wsgi users + ------------------------------- + + If you're using ``mod_wsgi`` to deploy your Django application you need to + include the following in your ``.wsgi`` module:: + + import djcelery + djcelery.setup_loader() + + Documentation + ============= + + The `Celery User Manual`_ contains user guides, tutorials and an API + reference. It also has a dedicated `subsection about the Django integration`_. + + .. _`Celery User Manual`: http://docs.celeryproject.org/ + .. _`subsection about the Django integration`: + http://docs.celeryproject.org/en/latest/django/ + .. _`Getting started with django-celery`: + http://docs.celeryproject.org/en/latest/django/first-steps-with-django.html + + Installation + ============= + + You can install ``django-celery`` either via the Python Package Index (PyPI) + or from source. + + To install using ``pip``,:: + + $ pip install django-celery + + To install using ``easy_install``,:: + + $ easy_install django-celery + + You will then want to create the necessary tables. If you generating + schema migrations, you'll want to run:: + + $ python manage.py migrate djcelery + + + + Downloading and installing from source + -------------------------------------- + + Download the latest version of ``django-celery`` from + http://pypi.python.org/pypi/django-celery/ + + You can install it by doing the following,:: + + $ tar xvfz django-celery-0.0.0.tar.gz + $ cd django-celery-0.0.0 + # python setup.py install # as root + + Using the development version + ------------------------------ + + You can clone the git repository by doing the following:: + + $ git clone git://github.com/celery/django-celery.git + + Getting Help + ============ + + Mailing list + ------------ + + For discussions about the usage, development, and future of celery, + please join the `celery-users`_ mailing list. + + .. _`celery-users`: http://groups.google.com/group/celery-users/ + + IRC + --- + + Come chat with us on IRC. The **#celery** channel is located at the `Freenode`_ + network. + + .. _`Freenode`: http://freenode.net + + + Bug tracker + =========== + + If you have any suggestions, bug reports or annoyances please report them + to our issue tracker at http://github.com/celery/django-celery/issues/ + + Wiki + ==== + + http://wiki.github.com/celery/celery/ + + Contributing + ============ + + Development of ``django-celery`` happens at Github: + http://github.com/celery/django-celery + + You are highly encouraged to participate in the development. + If you don't like Github (for some reason) you're welcome + to send regular patches. + + License + ======= + + This software is licensed under the ``New BSD License``. See the ``LICENSE`` + file in the top distribution directory for the full license text. + + .. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround + + +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Framework :: Django +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: POSIX +Classifier: Topic :: Communications +Classifier: Topic :: System :: Distributed Computing +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python :: Implementation :: Jython diff --git a/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/SOURCES.txt b/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/SOURCES.txt new file mode 100644 index 0000000..373c30d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/SOURCES.txt @@ -0,0 +1,137 @@ +AUTHORS +Changelog +FAQ +INSTALL +LICENSE +MANIFEST.in +README +README.rst +THANKS +setup.cfg +setup.py +django_celery.egg-info/PKG-INFO +django_celery.egg-info/SOURCES.txt +django_celery.egg-info/dependency_links.txt +django_celery.egg-info/not-zip-safe +django_celery.egg-info/requires.txt +django_celery.egg-info/top_level.txt +djcelery/__init__.py +djcelery/admin.py +djcelery/admin_utils.py +djcelery/app.py +djcelery/common.py +djcelery/compat.py +djcelery/db.py +djcelery/humanize.py +djcelery/loaders.py +djcelery/managers.py +djcelery/models.py +djcelery/mon.py +djcelery/picklefield.py +djcelery/schedulers.py +djcelery/snapshot.py +djcelery/urls.py +djcelery/utils.py +djcelery/views.py +djcelery/backends/__init__.py +djcelery/backends/cache.py +djcelery/backends/database.py +djcelery/contrib/__init__.py +djcelery/contrib/test_runner.py +djcelery/management/__init__.py +djcelery/management/base.py +djcelery/management/commands/__init__.py +djcelery/management/commands/celery.py +djcelery/management/commands/celerybeat.py +djcelery/management/commands/celerycam.py +djcelery/management/commands/celeryd.py +djcelery/management/commands/celeryd_detach.py +djcelery/management/commands/celeryd_multi.py +djcelery/management/commands/celerymon.py +djcelery/management/commands/djcelerymon.py +djcelery/migrations/0001_initial.py +djcelery/migrations/__init__.py +djcelery/monproj/__init__.py +djcelery/monproj/urls.py +djcelery/static/djcelery/style.css +djcelery/templates/admin/djcelery/change_list.html +djcelery/templates/djcelery/confirm_rate_limit.html +djcelery/tests/__init__.py +djcelery/tests/_compat.py +djcelery/tests/req.py +djcelery/tests/test_admin.py +djcelery/tests/test_commands.py +djcelery/tests/test_discovery.py +djcelery/tests/test_loaders.py +djcelery/tests/test_models.py +djcelery/tests/test_schedulers.py +djcelery/tests/test_snapshot.py +djcelery/tests/test_views.py +djcelery/tests/test_worker_job.py +djcelery/tests/utils.py +djcelery/tests/test_backends/__init__.py +djcelery/tests/test_backends/test_cache.py +djcelery/tests/test_backends/test_database.py +djcelery/transport/__init__.py +docs/Makefile +docs/__init__.py +docs/changelog.rst +docs/conf.py +docs/faq.rst +docs/index.rst +docs/introduction.rst +docs/settings.py +docs/.static/.keep +docs/.templates/page.html +docs/.templates/sidebarintro.html +docs/.templates/sidebarlogo.html +docs/_ext/applyxrefs.py +docs/_ext/literals_to_xrefs.py +docs/_theme/celery/theme.conf +docs/_theme/celery/static/celery.css_t +docs/cookbook/index.rst +docs/cookbook/unit-testing.rst +docs/getting-started/first-steps-with-django.rst +docs/getting-started/index.rst +docs/reference/djcelery.app.rst +docs/reference/djcelery.backends.cache.rst +docs/reference/djcelery.backends.database.rst +docs/reference/djcelery.common.rst +docs/reference/djcelery.compat.rst +docs/reference/djcelery.contrib.test_runner.rst +docs/reference/djcelery.db.rst +docs/reference/djcelery.humanize.rst +docs/reference/djcelery.loaders.rst +docs/reference/djcelery.managers.rst +docs/reference/djcelery.models.rst +docs/reference/djcelery.schedulers.rst +docs/reference/djcelery.snapshot.rst +docs/reference/djcelery.urls.rst +docs/reference/djcelery.utils.rst +docs/reference/djcelery.views.rst +docs/reference/index.rst +extra/release/doc4allmods +extra/release/removepyc.sh +extra/release/sphinx-to-rst.py +extra/release/verify-reference-index.sh +extra/requirements/default.txt +extra/requirements/docs.txt +extra/requirements/test.txt +locale/en/LC_MESSAGES/django.po +locale/es/LC_MESSAGES/django.mo +locale/es/LC_MESSAGES/django.po +requirements/default.txt +requirements/docs.txt +requirements/test.txt +tests/__init__.py +tests/manage.py +tests/settings.py +tests/urls.py +tests/someapp/__init__.py +tests/someapp/models.py +tests/someapp/tasks.py +tests/someapp/tests.py +tests/someapp/views.py +tests/someappwotask/__init__.py +tests/someappwotask/models.py +tests/someappwotask/views.py \ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/dependency_links.txt b/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/installed-files.txt b/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/installed-files.txt new file mode 100644 index 0000000..62a9aba --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/installed-files.txt @@ -0,0 +1,119 @@ +../djcelery/__init__.py +../djcelery/__pycache__/__init__.cpython-36.pyc +../djcelery/__pycache__/admin.cpython-36.pyc +../djcelery/__pycache__/admin_utils.cpython-36.pyc +../djcelery/__pycache__/app.cpython-36.pyc +../djcelery/__pycache__/common.cpython-36.pyc +../djcelery/__pycache__/compat.cpython-36.pyc +../djcelery/__pycache__/db.cpython-36.pyc +../djcelery/__pycache__/humanize.cpython-36.pyc +../djcelery/__pycache__/loaders.cpython-36.pyc +../djcelery/__pycache__/managers.cpython-36.pyc +../djcelery/__pycache__/models.cpython-36.pyc +../djcelery/__pycache__/mon.cpython-36.pyc +../djcelery/__pycache__/picklefield.cpython-36.pyc +../djcelery/__pycache__/schedulers.cpython-36.pyc +../djcelery/__pycache__/snapshot.cpython-36.pyc +../djcelery/__pycache__/urls.cpython-36.pyc +../djcelery/__pycache__/utils.cpython-36.pyc +../djcelery/__pycache__/views.cpython-36.pyc +../djcelery/admin.py +../djcelery/admin_utils.py +../djcelery/app.py +../djcelery/backends/__init__.py +../djcelery/backends/__pycache__/__init__.cpython-36.pyc +../djcelery/backends/__pycache__/cache.cpython-36.pyc +../djcelery/backends/__pycache__/database.cpython-36.pyc +../djcelery/backends/cache.py +../djcelery/backends/database.py +../djcelery/common.py +../djcelery/compat.py +../djcelery/contrib/__init__.py +../djcelery/contrib/__pycache__/__init__.cpython-36.pyc +../djcelery/contrib/__pycache__/test_runner.cpython-36.pyc +../djcelery/contrib/test_runner.py +../djcelery/db.py +../djcelery/humanize.py +../djcelery/loaders.py +../djcelery/management/__init__.py +../djcelery/management/__pycache__/__init__.cpython-36.pyc +../djcelery/management/__pycache__/base.cpython-36.pyc +../djcelery/management/base.py +../djcelery/management/commands/__init__.py +../djcelery/management/commands/__pycache__/__init__.cpython-36.pyc +../djcelery/management/commands/__pycache__/celery.cpython-36.pyc +../djcelery/management/commands/__pycache__/celerybeat.cpython-36.pyc +../djcelery/management/commands/__pycache__/celerycam.cpython-36.pyc +../djcelery/management/commands/__pycache__/celeryd.cpython-36.pyc +../djcelery/management/commands/__pycache__/celeryd_detach.cpython-36.pyc +../djcelery/management/commands/__pycache__/celeryd_multi.cpython-36.pyc +../djcelery/management/commands/__pycache__/celerymon.cpython-36.pyc +../djcelery/management/commands/__pycache__/djcelerymon.cpython-36.pyc +../djcelery/management/commands/celery.py +../djcelery/management/commands/celerybeat.py +../djcelery/management/commands/celerycam.py +../djcelery/management/commands/celeryd.py +../djcelery/management/commands/celeryd_detach.py +../djcelery/management/commands/celeryd_multi.py +../djcelery/management/commands/celerymon.py +../djcelery/management/commands/djcelerymon.py +../djcelery/managers.py +../djcelery/migrations/0001_initial.py +../djcelery/migrations/__init__.py +../djcelery/migrations/__pycache__/0001_initial.cpython-36.pyc +../djcelery/migrations/__pycache__/__init__.cpython-36.pyc +../djcelery/models.py +../djcelery/mon.py +../djcelery/monproj/__init__.py +../djcelery/monproj/__pycache__/__init__.cpython-36.pyc +../djcelery/monproj/__pycache__/urls.cpython-36.pyc +../djcelery/monproj/urls.py +../djcelery/picklefield.py +../djcelery/schedulers.py +../djcelery/snapshot.py +../djcelery/static/djcelery/style.css +../djcelery/templates/admin/djcelery/change_list.html +../djcelery/templates/djcelery/confirm_rate_limit.html +../djcelery/tests/__init__.py +../djcelery/tests/__pycache__/__init__.cpython-36.pyc +../djcelery/tests/__pycache__/_compat.cpython-36.pyc +../djcelery/tests/__pycache__/req.cpython-36.pyc +../djcelery/tests/__pycache__/test_admin.cpython-36.pyc +../djcelery/tests/__pycache__/test_commands.cpython-36.pyc +../djcelery/tests/__pycache__/test_discovery.cpython-36.pyc +../djcelery/tests/__pycache__/test_loaders.cpython-36.pyc +../djcelery/tests/__pycache__/test_models.cpython-36.pyc +../djcelery/tests/__pycache__/test_schedulers.cpython-36.pyc +../djcelery/tests/__pycache__/test_snapshot.cpython-36.pyc +../djcelery/tests/__pycache__/test_views.cpython-36.pyc +../djcelery/tests/__pycache__/test_worker_job.cpython-36.pyc +../djcelery/tests/__pycache__/utils.cpython-36.pyc +../djcelery/tests/_compat.py +../djcelery/tests/req.py +../djcelery/tests/test_admin.py +../djcelery/tests/test_backends/__init__.py +../djcelery/tests/test_backends/__pycache__/__init__.cpython-36.pyc +../djcelery/tests/test_backends/__pycache__/test_cache.cpython-36.pyc +../djcelery/tests/test_backends/__pycache__/test_database.cpython-36.pyc +../djcelery/tests/test_backends/test_cache.py +../djcelery/tests/test_backends/test_database.py +../djcelery/tests/test_commands.py +../djcelery/tests/test_discovery.py +../djcelery/tests/test_loaders.py +../djcelery/tests/test_models.py +../djcelery/tests/test_schedulers.py +../djcelery/tests/test_snapshot.py +../djcelery/tests/test_views.py +../djcelery/tests/test_worker_job.py +../djcelery/tests/utils.py +../djcelery/transport/__init__.py +../djcelery/transport/__pycache__/__init__.cpython-36.pyc +../djcelery/urls.py +../djcelery/utils.py +../djcelery/views.py +PKG-INFO +SOURCES.txt +dependency_links.txt +not-zip-safe +requires.txt +top_level.txt diff --git a/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/not-zip-safe b/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/not-zip-safe new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/not-zip-safe @@ -0,0 +1 @@ + diff --git a/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/requires.txt b/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/requires.txt new file mode 100644 index 0000000..20210b3 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/requires.txt @@ -0,0 +1,2 @@ +celery<4.0,>=3.1.15 +django>=1.8 diff --git a/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/top_level.txt b/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/top_level.txt new file mode 100644 index 0000000..786586c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/django_celery-3.2.2-py3.6.egg-info/top_level.txt @@ -0,0 +1 @@ +djcelery diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/__init__.py new file mode 100644 index 0000000..64039d3 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/__init__.py @@ -0,0 +1,34 @@ +"""Old django celery integration project.""" +# :copyright: (c) 2009 - 2015 by Ask Solem. +# :license: BSD, see LICENSE for more details. +from __future__ import absolute_import, unicode_literals + +import os +import sys + +VERSION = (3, 2, 2) +__version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:]) +__author__ = 'Ask Solem' +__contact__ = 'ask@celeryproject.org' +__homepage__ = 'http://celeryproject.org' +__docformat__ = 'restructuredtext' +__license__ = 'BSD (3 clause)' + +# -eof meta- + + +if sys.version_info[0] == 3: + + def setup_loader(): + os.environ.setdefault( + 'CELERY_LOADER', 'djcelery.loaders.DjangoLoader', + ) + +else: + + def setup_loader(): # noqa + os.environ.setdefault( + b'CELERY_LOADER', b'djcelery.loaders.DjangoLoader', + ) + +from celery import current_app as celery # noqa diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/admin.py b/thesisenv/lib/python3.6/site-packages/djcelery/admin.py new file mode 100644 index 0000000..874bd70 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/admin.py @@ -0,0 +1,385 @@ +from __future__ import absolute_import, unicode_literals + +from anyjson import loads + +from django import forms +from django.conf import settings +from django.contrib import admin +from django.contrib.admin import helpers +from django.contrib.admin.views import main as main_views +from django.forms.widgets import Select +from django.shortcuts import render_to_response +from django.template import RequestContext +from django.utils.html import escape +from django.utils.translation import ugettext_lazy as _ + +from celery import current_app +from celery import states +from celery.task.control import broadcast, revoke, rate_limit +from celery.utils import cached_property +from celery.utils.text import abbrtask + +from .admin_utils import action, display_field, fixedwidth +from .models import ( + TaskState, WorkerState, + PeriodicTask, IntervalSchedule, CrontabSchedule, + PeriodicTasks +) +from .humanize import naturaldate +from .utils import is_database_scheduler, make_aware + +try: + from django.utils.encoding import force_text +except ImportError: + from django.utils.encoding import force_unicode as force_text # noqa + + +TASK_STATE_COLORS = {states.SUCCESS: 'green', + states.FAILURE: 'red', + states.REVOKED: 'magenta', + states.STARTED: 'yellow', + states.RETRY: 'orange', + 'RECEIVED': 'blue'} +NODE_STATE_COLORS = {'ONLINE': 'green', + 'OFFLINE': 'gray'} + + +class MonitorList(main_views.ChangeList): + + def __init__(self, *args, **kwargs): + super(MonitorList, self).__init__(*args, **kwargs) + self.title = self.model_admin.list_page_title + + +@display_field(_('state'), 'state') +def colored_state(task): + state = escape(task.state) + color = TASK_STATE_COLORS.get(task.state, 'black') + return '{1}'.format(color, state) + + +@display_field(_('state'), 'last_heartbeat') +def node_state(node): + state = node.is_alive() and 'ONLINE' or 'OFFLINE' + color = NODE_STATE_COLORS[state] + return '{1}'.format(color, state) + + +@display_field(_('ETA'), 'eta') +def eta(task): + if not task.eta: + return 'none' + return escape(make_aware(task.eta)) + + +@display_field(_('when'), 'tstamp') +def tstamp(task): + # convert to local timezone + value = make_aware(task.tstamp) + return '
{1}
'.format( + escape(str(value)), escape(naturaldate(value)), + ) + + +@display_field(_('name'), 'name') +def name(task): + short_name = abbrtask(task.name, 16) + return '
{1}
'.format( + escape(task.name), escape(short_name), + ) + + +class ModelMonitor(admin.ModelAdmin): + can_add = False + can_delete = False + + def get_changelist(self, request, **kwargs): + return MonitorList + + def change_view(self, request, object_id, extra_context=None): + extra_context = extra_context or {} + extra_context.setdefault('title', self.detail_title) + return super(ModelMonitor, self).change_view( + request, object_id, extra_context=extra_context, + ) + + def has_delete_permission(self, request, obj=None): + if not self.can_delete: + return False + return super(ModelMonitor, self).has_delete_permission(request, obj) + + def has_add_permission(self, request): + if not self.can_add: + return False + return super(ModelMonitor, self).has_add_permission(request) + + +class TaskMonitor(ModelMonitor): + detail_title = _('Task detail') + list_page_title = _('Tasks') + rate_limit_confirmation_template = 'djcelery/confirm_rate_limit.html' + date_hierarchy = 'tstamp' + fieldsets = ( + (None, { + 'fields': ('state', 'task_id', 'name', 'args', 'kwargs', + 'eta', 'runtime', 'worker', 'tstamp'), + 'classes': ('extrapretty', ), + }), + ('Details', { + 'classes': ('collapse', 'extrapretty'), + 'fields': ('result', 'traceback', 'expires'), + }), + ) + list_display = ( + fixedwidth('task_id', name=_('UUID'), pt=8), + colored_state, + name, + fixedwidth('args', pretty=True), + fixedwidth('kwargs', pretty=True), + eta, + tstamp, + 'worker', + ) + readonly_fields = ( + 'state', 'task_id', 'name', 'args', 'kwargs', + 'eta', 'runtime', 'worker', 'result', 'traceback', + 'expires', 'tstamp', + ) + list_filter = ('state', 'name', 'tstamp', 'eta', 'worker') + search_fields = ('name', 'task_id', 'args', 'kwargs', 'worker__hostname') + actions = ['revoke_tasks', + 'terminate_tasks', + 'kill_tasks', + 'rate_limit_tasks'] + + class Media: + css = {'all': ('djcelery/style.css', )} + + @action(_('Revoke selected tasks')) + def revoke_tasks(self, request, queryset): + with current_app.default_connection() as connection: + for state in queryset: + revoke(state.task_id, connection=connection) + + @action(_('Terminate selected tasks')) + def terminate_tasks(self, request, queryset): + with current_app.default_connection() as connection: + for state in queryset: + revoke(state.task_id, connection=connection, terminate=True) + + @action(_('Kill selected tasks')) + def kill_tasks(self, request, queryset): + with current_app.default_connection() as connection: + for state in queryset: + revoke(state.task_id, connection=connection, + terminate=True, signal='KILL') + + @action(_('Rate limit selected tasks')) + def rate_limit_tasks(self, request, queryset): + tasks = set([task.name for task in queryset]) + opts = self.model._meta + app_label = opts.app_label + if request.POST.get('post'): + rate = request.POST['rate_limit'] + with current_app.default_connection() as connection: + for task_name in tasks: + rate_limit(task_name, rate, connection=connection) + return None + + context = { + 'title': _('Rate limit selection'), + 'queryset': queryset, + 'object_name': force_text(opts.verbose_name), + 'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME, + 'opts': opts, + 'app_label': app_label, + } + + return render_to_response( + self.rate_limit_confirmation_template, context, + context_instance=RequestContext(request), + ) + + def get_actions(self, request): + actions = super(TaskMonitor, self).get_actions(request) + actions.pop('delete_selected', None) + return actions + + def get_queryset(self, request): + qs = super(TaskMonitor, self).get_queryset(request) + return qs.select_related('worker') + + +class WorkerMonitor(ModelMonitor): + can_add = True + detail_title = _('Node detail') + list_page_title = _('Worker Nodes') + list_display = ('hostname', node_state) + readonly_fields = ('last_heartbeat', ) + actions = ['shutdown_nodes', + 'enable_events', + 'disable_events'] + + @action(_('Shutdown selected worker nodes')) + def shutdown_nodes(self, request, queryset): + broadcast('shutdown', destination=[n.hostname for n in queryset]) + + @action(_('Enable event mode for selected nodes.')) + def enable_events(self, request, queryset): + broadcast('enable_events', + destination=[n.hostname for n in queryset]) + + @action(_('Disable event mode for selected nodes.')) + def disable_events(self, request, queryset): + broadcast('disable_events', + destination=[n.hostname for n in queryset]) + + def get_actions(self, request): + actions = super(WorkerMonitor, self).get_actions(request) + actions.pop('delete_selected', None) + return actions + + +admin.site.register(TaskState, TaskMonitor) +admin.site.register(WorkerState, WorkerMonitor) + + +# ### Periodic Tasks + + +class TaskSelectWidget(Select): + celery_app = current_app + _choices = None + + def tasks_as_choices(self): + _ = self._modules # noqa + tasks = list(sorted(name for name in self.celery_app.tasks + if not name.startswith('celery.'))) + return (('', ''), ) + tuple(zip(tasks, tasks)) + + @property + def choices(self): + if self._choices is None: + self._choices = self.tasks_as_choices() + return self._choices + + @choices.setter + def choices(self, _): + # ChoiceField.__init__ sets ``self.choices = choices`` + # which would override ours. + pass + + @cached_property + def _modules(self): + self.celery_app.loader.import_default_modules() + + +class TaskChoiceField(forms.ChoiceField): + widget = TaskSelectWidget + + def valid_value(self, value): + return True + + +class PeriodicTaskForm(forms.ModelForm): + regtask = TaskChoiceField(label=_('Task (registered)'), + required=False) + task = forms.CharField(label=_('Task (custom)'), required=False, + max_length=200) + + class Meta: + model = PeriodicTask + exclude = () + + def clean(self): + data = super(PeriodicTaskForm, self).clean() + regtask = data.get('regtask') + if regtask: + data['task'] = regtask + if not data['task']: + exc = forms.ValidationError(_('Need name of task')) + self._errors['task'] = self.error_class(exc.messages) + raise exc + return data + + def _clean_json(self, field): + value = self.cleaned_data[field] + try: + loads(value) + except ValueError as exc: + raise forms.ValidationError( + _('Unable to parse JSON: %s') % exc, + ) + return value + + def clean_args(self): + return self._clean_json('args') + + def clean_kwargs(self): + return self._clean_json('kwargs') + + +class PeriodicTaskAdmin(admin.ModelAdmin): + form = PeriodicTaskForm + model = PeriodicTask + list_display = ( + 'enabled', + '__unicode__', + 'task', + 'args', + 'kwargs', + ) + search_fields = ('name', 'task') + list_display_links = ('enabled', '__unicode__', 'task') + ordering = ('-enabled', 'name') + fieldsets = ( + (None, { + 'fields': ('name', 'regtask', 'task', 'enabled'), + 'classes': ('extrapretty', 'wide'), + }), + ('Schedule', { + 'fields': ('interval', 'crontab'), + 'classes': ('extrapretty', 'wide', ), + }), + ('Arguments', { + 'fields': ('args', 'kwargs'), + 'classes': ('extrapretty', 'wide', 'collapse'), + }), + ('Execution Options', { + 'fields': ('expires', 'queue', 'exchange', 'routing_key'), + 'classes': ('extrapretty', 'wide', 'collapse'), + }), + ) + actions = ['enable_tasks', + 'disable_tasks'] + + def update_periodic_tasks(self): + dummy_periodic_task = PeriodicTask() + dummy_periodic_task.no_changes = False + PeriodicTasks.changed(dummy_periodic_task) + + @action(_('Enable selected periodic tasks')) + def enable_tasks(self, request, queryset): + queryset.update(enabled=True) + self.update_periodic_tasks() + + @action(_('Disable selected periodic tasks')) + def disable_tasks(self, request, queryset): + queryset.update(enabled=False) + self.update_periodic_tasks() + + def changelist_view(self, request, extra_context=None): + extra_context = extra_context or {} + scheduler = getattr(settings, 'CELERYBEAT_SCHEDULER', None) + extra_context['wrong_scheduler'] = not is_database_scheduler(scheduler) + return super(PeriodicTaskAdmin, self).changelist_view(request, + extra_context) + + def get_queryset(self, request): + qs = super(PeriodicTaskAdmin, self).get_queryset(request) + return qs.select_related('interval', 'crontab') + + +admin.site.register(IntervalSchedule) +admin.site.register(CrontabSchedule) +admin.site.register(PeriodicTask, PeriodicTaskAdmin) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/admin_utils.py b/thesisenv/lib/python3.6/site-packages/djcelery/admin_utils.py new file mode 100644 index 0000000..da5b39e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/admin_utils.py @@ -0,0 +1,50 @@ +from __future__ import absolute_import, unicode_literals + +from pprint import pformat + +from django.utils.html import escape + +FIXEDWIDTH_STYLE = '''\ +{2} \ +''' + + +def attrs(**kwargs): + def _inner(fun): + for attr_name, attr_value in kwargs.items(): + setattr(fun, attr_name, attr_value) + return fun + return _inner + + +def display_field(short_description, admin_order_field, + allow_tags=True, **kwargs): + return attrs(short_description=short_description, + admin_order_field=admin_order_field, + allow_tags=allow_tags, **kwargs) + + +def action(short_description, **kwargs): + return attrs(short_description=short_description, **kwargs) + + +def fixedwidth(field, name=None, pt=6, width=16, maxlen=64, pretty=False): + + @display_field(name or field, field) + def f(task): + val = getattr(task, field) + if pretty: + val = pformat(val, width=width) + if val.startswith("u'") or val.startswith('u"'): + val = val[2:-1] + shortval = val.replace(',', ',\n') + shortval = shortval.replace('\n', '|br/|') + + if len(shortval) > maxlen: + shortval = shortval[:maxlen] + '...' + styled = FIXEDWIDTH_STYLE.format( + escape(val[:255]), pt, escape(shortval), + ) + return styled.replace('|br/|', '
') + return f diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/app.py b/thesisenv/lib/python3.6/site-packages/djcelery/app.py new file mode 100644 index 0000000..7b75759 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/app.py @@ -0,0 +1,7 @@ +from __future__ import absolute_import, unicode_literals + +from celery import current_app + + +#: The Django-Celery app instance. +app = current_app._get_current_object() diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/backends/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/backends/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/backends/cache.py b/thesisenv/lib/python3.6/site-packages/djcelery/backends/cache.py new file mode 100644 index 0000000..203326b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/backends/cache.py @@ -0,0 +1,34 @@ +"""celery.backends.cache""" +from __future__ import absolute_import, unicode_literals + +from datetime import timedelta + +from django.core.cache import cache, caches + +from celery import current_app +from celery.backends.base import KeyValueStoreBackend + +# CELERY_CACHE_BACKEND overrides the django-global(tm) backend settings. +if current_app.conf.CELERY_CACHE_BACKEND: + cache = caches[current_app.conf.CELERY_CACHE_BACKEND] # noqa + + +class CacheBackend(KeyValueStoreBackend): + """Backend using the Django cache framework to store task metadata.""" + + def __init__(self, *args, **kwargs): + super(CacheBackend, self).__init__(*args, **kwargs) + expires = kwargs.get('expires', + current_app.conf.CELERY_TASK_RESULT_EXPIRES) + if isinstance(expires, timedelta): + expires = int(max(expires.total_seconds(), 0)) + self.expires = expires + + def get(self, key): + return cache.get(key) + + def set(self, key, value): + cache.set(key, value, self.expires) + + def delete(self, key): + cache.delete(key) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/backends/database.py b/thesisenv/lib/python3.6/site-packages/djcelery/backends/database.py new file mode 100644 index 0000000..8d640a0 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/backends/database.py @@ -0,0 +1,65 @@ +from __future__ import absolute_import, unicode_literals + +from celery import current_app +from celery.backends.base import BaseDictBackend + +try: + from celery.utils.timeutils import maybe_timedelta +except ImportError: + from celery.utils.time import maybe_timedelta + +from ..models import TaskMeta, TaskSetMeta + + +class DatabaseBackend(BaseDictBackend): + """The database backend. + + Using Django models to store task state. + + """ + TaskModel = TaskMeta + TaskSetModel = TaskSetMeta + + expires = current_app.conf.CELERY_TASK_RESULT_EXPIRES + create_django_tables = True + + subpolling_interval = 0.5 + + def _store_result(self, task_id, result, status, + traceback=None, request=None): + """Store return value and status of an executed task.""" + self.TaskModel._default_manager.store_result( + task_id, result, status, + traceback=traceback, children=self.current_task_children(request), + ) + return result + + def _save_group(self, group_id, result): + """Store the result of an executed group.""" + self.TaskSetModel._default_manager.store_result(group_id, result) + return result + + def _get_task_meta_for(self, task_id): + """Get task metadata for a task by id.""" + return self.TaskModel._default_manager.get_task(task_id).to_dict() + + def _restore_group(self, group_id): + """Get group metadata for a group by id.""" + meta = self.TaskSetModel._default_manager.restore_taskset(group_id) + if meta: + return meta.to_dict() + + def _delete_group(self, group_id): + self.TaskSetModel._default_manager.delete_taskset(group_id) + + def _forget(self, task_id): + try: + self.TaskModel._default_manager.get(task_id=task_id).delete() + except self.TaskModel.DoesNotExist: + pass + + def cleanup(self): + """Delete expired metadata.""" + expires = maybe_timedelta(self.expires) + for model in self.TaskModel, self.TaskSetModel: + model._default_manager.delete_expired(expires) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/common.py b/thesisenv/lib/python3.6/site-packages/djcelery/common.py new file mode 100644 index 0000000..a6535db --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/common.py @@ -0,0 +1,72 @@ +from __future__ import absolute_import, unicode_literals + +from contextlib import contextmanager +from functools import wraps + +from django.utils import translation + + +@contextmanager +def respect_language(language): + """Context manager that changes the current translation language for + all code inside the following block. + + Can e.g. be used inside tasks like this:: + + from celery import task + from djcelery.common import respect_language + + @task + def my_task(language=None): + with respect_language(language): + pass + """ + if language: + prev = translation.get_language() + translation.activate(language) + try: + yield + finally: + translation.activate(prev) + else: + yield + + +def respects_language(fun): + """Decorator for tasks with respect to site's current language. + You can use this decorator on your tasks together with default @task + decorator (remember that the task decorator must be applied last). + + See also the with-statement alternative :func:`respect_language`. + + **Example**: + + .. code-block:: python + + @task + @respects_language + def my_task() + # localize something. + + The task will then accept a ``language`` argument that will be + used to set the language in the task, and the task can thus be + called like: + + .. code-block:: python + + from django.utils import translation + from myapp.tasks import my_task + + # Pass the current language on to the task + my_task.delay(language=translation.get_language()) + + # or set the language explicitly + my_task.delay(language='no.no') + + """ + + @wraps(fun) + def _inner(*args, **kwargs): + with respect_language(kwargs.pop('language', None)): + return fun(*args, **kwargs) + return _inner diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/compat.py b/thesisenv/lib/python3.6/site-packages/djcelery/compat.py new file mode 100644 index 0000000..fc797a3 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/compat.py @@ -0,0 +1,44 @@ +from __future__ import absolute_import + +import os +import sys + +from kombu.utils.encoding import bytes_to_str, str_to_bytes + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + + +def python_2_unicode_compatible(cls): + """Taken from Django project (django/utils/encoding.py) & modified a bit to + always have __unicode__ method available. + """ + if '__str__' not in cls.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + cls.__name__) + + cls.__unicode__ = cls.__str__ + + if PY2: + cls.__str__ = lambda self: self.__unicode__().encode('utf-8') + + return cls + + +if PY3: + unicode = str + + def itervalues(x): + return x.values() + + def setenv(k, v): + os.environ[bytes_to_str(k)] = bytes_to_str(v) +else: + unicode = unicode + + def itervalues(x): # noqa + return x.itervalues() + + def setenv(k, v): # noqa + os.environ[str_to_bytes(k)] = str_to_bytes(v) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/contrib/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/contrib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/contrib/test_runner.py b/thesisenv/lib/python3.6/site-packages/djcelery/contrib/test_runner.py new file mode 100644 index 0000000..e65de29 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/contrib/test_runner.py @@ -0,0 +1,69 @@ +from __future__ import absolute_import, unicode_literals + +from django.conf import settings +try: + from django.test.runner import DiscoverRunner +except ImportError: + from django.test.simple import DjangoTestSuiteRunner as DiscoverRunner + +from celery import current_app +from celery.task import Task +from djcelery.backends.database import DatabaseBackend + + +USAGE = """\ +Custom test runner to allow testing of celery delayed tasks. +""" + + +def _set_eager(): + settings.CELERY_ALWAYS_EAGER = True + current_app.conf.CELERY_ALWAYS_EAGER = True + settings.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True # Issue #75 + current_app.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True + + +class CeleryTestSuiteRunner(DiscoverRunner): + """Django test runner allowing testing of celery delayed tasks. + + All tasks are run locally, not in a worker. + + To use this runner set ``settings.TEST_RUNNER``:: + + TEST_RUNNER = 'djcelery.contrib.test_runner.CeleryTestSuiteRunner' + + """ + def setup_test_environment(self, **kwargs): + _set_eager() + super(CeleryTestSuiteRunner, self).setup_test_environment(**kwargs) + + +class CeleryTestSuiteRunnerStoringResult(DiscoverRunner): + """Django test runner allowing testing of celery delayed tasks, + and storing the results of those tasks in ``TaskMeta``. + + Requires setting CELERY_RESULT_BACKEND = 'database'. + + USAGE: + + In ``settings.py``:: + + TEST_RUNNER = ''' + djcelery.contrib.test_runner.CeleryTestSuiteRunnerStoringResult + '''.strip() + + """ + + def setup_test_environment(self, **kwargs): + # Monkey-patch Task.on_success() method + def on_success_patched(self, retval, task_id, args, kwargs): + app = current_app._get_current_object() + DatabaseBackend(app=app).store_result(task_id, retval, 'SUCCESS') + Task.on_success = classmethod(on_success_patched) + + super(CeleryTestSuiteRunnerStoringResult, self).setup_test_environment( + **kwargs + ) + + settings.CELERY_RESULT_BACKEND = 'database' + _set_eager() diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/db.py b/thesisenv/lib/python3.6/site-packages/djcelery/db.py new file mode 100644 index 0000000..2204083 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/db.py @@ -0,0 +1,63 @@ +from __future__ import absolute_import + +import django + +from contextlib import contextmanager +from django.db import transaction + +if django.VERSION < (1, 6): # pragma: no cover + + def get_queryset(s): + return s.get_query_set() +else: + def get_queryset(s): # noqa + return s.get_queryset() + +try: + from django.db.transaction import atomic # noqa +except ImportError: # pragma: no cover + + try: + from django.db.transaction import Transaction # noqa + except ImportError: + @contextmanager + def commit_on_success(*args, **kwargs): + try: + transaction.enter_transaction_management(*args, **kwargs) + transaction.managed(True, *args, **kwargs) + try: + yield + except: + if transaction.is_dirty(*args, **kwargs): + transaction.rollback(*args, **kwargs) + raise + else: + if transaction.is_dirty(*args, **kwargs): + try: + transaction.commit(*args, **kwargs) + except: + transaction.rollback(*args, **kwargs) + raise + finally: + transaction.leave_transaction_management(*args, **kwargs) + else: # pragma: no cover + from django.db.transaction import commit_on_success # noqa + + commit_unless_managed = transaction.commit_unless_managed + rollback_unless_managed = transaction.rollback_unless_managed +else: + @contextmanager + def commit_on_success(using=None): # noqa + connection = transaction.get_connection(using) + if connection.features.autocommits_when_autocommit_is_off: + # ignore stupid warnings and errors + yield + else: + with transaction.atomic(using): + yield + + def commit_unless_managed(*args, **kwargs): # noqa + pass + + def rollback_unless_managed(*args, **kwargs): # noqa + pass diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/humanize.py b/thesisenv/lib/python3.6/site-packages/djcelery/humanize.py new file mode 100644 index 0000000..74517cc --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/humanize.py @@ -0,0 +1,85 @@ +from __future__ import absolute_import, unicode_literals + +from datetime import datetime + +from django.utils.translation import ungettext, ugettext as _ +from .utils import now + + +def pluralize_year(n): + return ungettext(_('{num} year ago'), _('{num} years ago'), n) + + +def pluralize_month(n): + return ungettext(_('{num} month ago'), _('{num} months ago'), n) + + +def pluralize_week(n): + return ungettext(_('{num} week ago'), _('{num} weeks ago'), n) + + +def pluralize_day(n): + return ungettext(_('{num} day ago'), _('{num} days ago'), n) + + +OLDER_CHUNKS = ( + (365.0, pluralize_year), + (30.0, pluralize_month), + (7.0, pluralize_week), + (1.0, pluralize_day), +) + + +def _un(singular__plural, n=None): + singular, plural = singular__plural + return ungettext(singular, plural, n) + + +def naturaldate(date, include_seconds=False): + """Convert datetime into a human natural date string.""" + + if not date: + return '' + + right_now = now() + today = datetime(right_now.year, right_now.month, + right_now.day, tzinfo=right_now.tzinfo) + delta = right_now - date + delta_midnight = today - date + + days = delta.days + hours = delta.seconds // 3600 + minutes = delta.seconds // 60 + seconds = delta.seconds + + if days < 0: + return _('just now') + + if days == 0: + if hours == 0: + if minutes > 0: + return ungettext( + _('{minutes} minute ago'), + _('{minutes} minutes ago'), minutes + ).format(minutes=minutes) + else: + if include_seconds and seconds: + return ungettext( + _('{seconds} second ago'), + _('{seconds} seconds ago'), seconds + ).format(seconds=seconds) + return _('just now') + else: + return ungettext( + _('{hours} hour ago'), _('{hours} hours ago'), hours + ).format(hours=hours) + + if delta_midnight.days == 0: + return _('yesterday at {time}').format(time=date.strftime('%H:%M')) + + count = 0 + for chunk, pluralizefun in OLDER_CHUNKS: + if days >= chunk: + count = int(round((delta_midnight.days + 1) / chunk, 0)) + fmt = pluralizefun(count) + return fmt.format(num=count) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/loaders.py b/thesisenv/lib/python3.6/site-packages/djcelery/loaders.py new file mode 100644 index 0000000..b19e07a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/loaders.py @@ -0,0 +1,202 @@ +from __future__ import absolute_import + +import os +import imp +import importlib + +from datetime import datetime +from warnings import warn + +from celery import signals +try: + from celery.utils.collections import DictAttribute +except ImportError: + from celery.datastructures import DictAttribute +from celery.loaders.base import BaseLoader + +from django import db +from django.conf import settings +from django.core import cache +from django.core.mail import mail_admins + +from .utils import DATABASE_ERRORS, now + +_RACE_PROTECTION = False + + +def _maybe_close_fd(fh): + try: + os.close(fh.fileno()) + except (AttributeError, OSError, TypeError): + # TypeError added for celery#962 + pass + + +class DjangoLoader(BaseLoader): + """The Django loader.""" + _db_reuse = 0 + + override_backends = { + 'database': 'djcelery.backends.database.DatabaseBackend', + 'cache': 'djcelery.backends.cache.CacheBackend', + } + + def __init__(self, *args, **kwargs): + super(DjangoLoader, self).__init__(*args, **kwargs) + self._install_signal_handlers() + + def _install_signal_handlers(self): + # Need to close any open database connection after + # any embedded celerybeat process forks. + signals.beat_embedded_init.connect(self.close_database) + signals.worker_ready.connect(self.warn_if_debug) + + def now(self, utc=False): + return datetime.utcnow() if utc else now() + + def read_configuration(self): + """Load configuration from Django settings.""" + self.configured = True + # Default backend needs to be the database backend for backward + # compatibility. + backend = (getattr(settings, 'CELERY_RESULT_BACKEND', None) or + getattr(settings, 'CELERY_BACKEND', None)) + if not backend: + settings.CELERY_RESULT_BACKEND = 'database' + return DictAttribute(settings) + + def _close_database(self): + try: + funs = [conn.close for conn in db.connections] + except AttributeError: + if hasattr(db, 'close_old_connections'): # Django 1.6+ + funs = [db.close_old_connections] + else: + funs = [db.close_connection] # pre multidb + + for close in funs: + try: + close() + except DATABASE_ERRORS as exc: + str_exc = str(exc) + if 'closed' not in str_exc and 'not connected' not in str_exc: + raise + + def close_database(self, **kwargs): + db_reuse_max = self.conf.get('CELERY_DB_REUSE_MAX', None) + if not db_reuse_max: + return self._close_database() + if self._db_reuse >= db_reuse_max * 2: + self._db_reuse = 0 + self._close_database() + self._db_reuse += 1 + + def close_cache(self): + try: + cache.cache.close() + except (TypeError, AttributeError): + pass + + def on_process_cleanup(self): + """Does everything necessary for Django to work in a long-living, + multiprocessing environment. + + """ + # See http://groups.google.com/group/django-users/ + # browse_thread/thread/78200863d0c07c6d/ + self.close_database() + self.close_cache() + + def on_task_init(self, task_id, task): + """Called before every task.""" + try: + is_eager = task.request.is_eager + except AttributeError: + is_eager = False + if not is_eager: + self.close_database() + + def on_worker_init(self): + """Called when the worker starts. + + Automatically discovers any ``tasks.py`` files in the applications + listed in ``INSTALLED_APPS``. + + """ + self.import_default_modules() + + self.close_database() + self.close_cache() + + def warn_if_debug(self, **kwargs): + if settings.DEBUG: + warn('Using settings.DEBUG leads to a memory leak, never ' + 'use this setting in production environments!') + + def import_default_modules(self): + super(DjangoLoader, self).import_default_modules() + self.autodiscover() + + def autodiscover(self): + self.task_modules.update(mod.__name__ for mod in autodiscover() or ()) + + def on_worker_process_init(self): + # the parent process may have established these, + # so need to close them. + + # calling db.close() on some DB connections will cause + # the inherited DB conn to also get broken in the parent + # process so we need to remove it without triggering any + # network IO that close() might cause. + try: + for c in db.connections.all(): + if c and c.connection: + _maybe_close_fd(c.connection) + except AttributeError: + if db.connection and db.connection.connection: + _maybe_close_fd(db.connection.connection) + + # use the _ version to avoid DB_REUSE preventing the conn.close() call + self._close_database() + self.close_cache() + + def mail_admins(self, subject, body, fail_silently=False, **kwargs): + return mail_admins(subject, body, fail_silently=fail_silently) + + +def autodiscover(): + """Include tasks for all applications in ``INSTALLED_APPS``.""" + global _RACE_PROTECTION + + if _RACE_PROTECTION: + return + _RACE_PROTECTION = True + try: + return filter(None, [find_related_module(app, 'tasks') + for app in settings.INSTALLED_APPS]) + finally: + _RACE_PROTECTION = False + + +def find_related_module(app, related_name): + """Given an application name and a module name, tries to find that + module in the application.""" + + try: + app_path = importlib.import_module(app).__path__ + except ImportError as exc: + warn('Autodiscover: Error importing %s.%s: %r' % ( + app, related_name, exc, + )) + return + except AttributeError: + return + + try: + f, _, _ = imp.find_module(related_name, app_path) + # f is returned None when app_path is a module + f and f.close() + except ImportError: + return + + return importlib.import_module('{0}.{1}'.format(app, related_name)) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/base.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/base.py new file mode 100644 index 0000000..c999477 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/management/base.py @@ -0,0 +1,142 @@ +from __future__ import absolute_import, unicode_literals + +import celery +import djcelery +import sys + +from django.core.management.base import BaseCommand + +from djcelery.compat import setenv + +DB_SHARED_THREAD = """\ +DatabaseWrapper objects created in a thread can only \ +be used in that same thread. The object with alias '{0}' \ +was created in thread id {1} and this is thread id {2}.\ +""" + + +def patch_thread_ident(): + # monkey patch django. + # This patch make sure that we use real threads to get the ident which + # is going to happen if we are using gevent or eventlet. + # -- patch taken from gunicorn + if getattr(patch_thread_ident, 'called', False): + return + try: + from django.db.backends import BaseDatabaseWrapper, DatabaseError + + if 'validate_thread_sharing' in BaseDatabaseWrapper.__dict__: + import thread + _get_ident = thread.get_ident + + __old__init__ = BaseDatabaseWrapper.__init__ + + def _init(self, *args, **kwargs): + __old__init__(self, *args, **kwargs) + self._thread_ident = _get_ident() + + def _validate_thread_sharing(self): + if (not self.allow_thread_sharing and + self._thread_ident != _get_ident()): + raise DatabaseError( + DB_SHARED_THREAD % ( + self.alias, self._thread_ident, _get_ident()), + ) + + BaseDatabaseWrapper.__init__ = _init + BaseDatabaseWrapper.validate_thread_sharing = \ + _validate_thread_sharing + + patch_thread_ident.called = True + except ImportError: + pass + + +patch_thread_ident() + + +class CeleryCommand(BaseCommand): + options = () + if hasattr(BaseCommand, 'option_list'): + options = BaseCommand.option_list + else: + def add_arguments(self, parser): + option_typemap = { + "string": str, + "int": int, + "float": float + } + for opt in self.option_list: + option = {k: v + for k, v in opt.__dict__.items() + if v is not None} + flags = (option.get("_long_opts", []) + + option.get("_short_opts", [])) + if option.get('default') == ('NO', 'DEFAULT'): + option['default'] = None + if option.get("nargs") == 1: + del option["nargs"] + del option["_long_opts"] + del option["_short_opts"] + if "type" in option: + opttype = option["type"] + option["type"] = option_typemap.get(opttype, opttype) + parser.add_argument(*flags, **option) + + skip_opts = ['--app', '--loader', '--config', '--no-color'] + requires_system_checks = False + keep_base_opts = False + stdout, stderr = sys.stdout, sys.stderr + + def get_version(self): + return 'celery {c.__version__}\ndjango-celery {d.__version__}'.format( + c=celery, d=djcelery, + ) + + def execute(self, *args, **options): + broker = options.get('broker') + if broker: + self.set_broker(broker) + super(CeleryCommand, self).execute(*args, **options) + + def set_broker(self, broker): + setenv('CELERY_BROKER_URL', broker) + + def run_from_argv(self, argv): + self.handle_default_options(argv[2:]) + return super(CeleryCommand, self).run_from_argv(argv) + + def handle_default_options(self, argv): + acc = [] + broker = None + for i, arg in enumerate(argv): + # --settings and --pythonpath are also handled + # by BaseCommand.handle_default_options, but that is + # called with the resulting options parsed by optparse. + if '--settings=' in arg: + _, settings_module = arg.split('=') + setenv('DJANGO_SETTINGS_MODULE', settings_module) + elif '--pythonpath=' in arg: + _, pythonpath = arg.split('=') + sys.path.insert(0, pythonpath) + elif '--broker=' in arg: + _, broker = arg.split('=') + elif arg == '-b': + broker = argv[i + 1] + else: + acc.append(arg) + if broker: + self.set_broker(broker) + return argv if self.keep_base_opts else acc + + def die(self, msg): + sys.stderr.write(msg) + sys.stderr.write('\n') + sys.exit() + + def _is_unwanted_option(self, option): + return option._long_opts and option._long_opts[0] in self.skip_opts + + @property + def option_list(self): + return [x for x in self.options if not self._is_unwanted_option(x)] diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celery.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celery.py new file mode 100644 index 0000000..6e842d7 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celery.py @@ -0,0 +1,22 @@ +from __future__ import absolute_import, unicode_literals + +from celery.bin import celery + +from djcelery.app import app +from djcelery.management.base import CeleryCommand + +base = celery.CeleryCommand(app=app) + + +class Command(CeleryCommand): + """The celery command.""" + help = 'celery commands, see celery help' + options = (CeleryCommand.options + + base.get_options() + + base.preload_options) + + def run_from_argv(self, argv): + argv = self.handle_default_options(argv) + base.execute_from_commandline( + ['{0[0]} {0[1]}'.format(argv)] + argv[2:], + ) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerybeat.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerybeat.py new file mode 100644 index 0000000..e4573dc --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerybeat.py @@ -0,0 +1,24 @@ +""" + +Start the celery clock service from the Django management command. + +""" +from __future__ import absolute_import, unicode_literals + +from celery.bin import beat + +from djcelery.app import app +from djcelery.management.base import CeleryCommand + +beat = beat.beat(app=app) + + +class Command(CeleryCommand): + """Run the celery periodic task scheduler.""" + options = (CeleryCommand.options + + beat.get_options() + + beat.preload_options) + help = 'Old alias to the "celery beat" command.' + + def handle(self, *args, **options): + beat.run(*args, **options) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerycam.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerycam.py new file mode 100644 index 0000000..2849b44 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerycam.py @@ -0,0 +1,26 @@ +""" + +Shortcut to the Django snapshot service. + +""" +from __future__ import absolute_import, unicode_literals + +from celery.bin import events + +from djcelery.app import app +from djcelery.management.base import CeleryCommand + +ev = events.events(app=app) + + +class Command(CeleryCommand): + """Run the celery curses event viewer.""" + options = (CeleryCommand.options + + ev.get_options() + + ev.preload_options) + help = 'Takes snapshots of the clusters state to the database.' + + def handle(self, *args, **options): + """Handle the management command.""" + options['camera'] = 'djcelery.snapshot.Camera' + ev.run(*args, **options) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd.py new file mode 100644 index 0000000..0ed4c40 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd.py @@ -0,0 +1,25 @@ +""" + +Start the celery daemon from the Django management command. + +""" +from __future__ import absolute_import, unicode_literals + +from celery.bin import worker + +from djcelery.app import app +from djcelery.management.base import CeleryCommand + +worker = worker.worker(app=app) + + +class Command(CeleryCommand): + """Run the celery daemon.""" + help = 'Old alias to the "celery worker" command.' + options = (CeleryCommand.options + + worker.get_options() + + worker.preload_options) + + def handle(self, *args, **options): + worker.check_args(args) + worker.run(**options) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd_detach.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd_detach.py new file mode 100644 index 0000000..3f2533b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd_detach.py @@ -0,0 +1,25 @@ +""" + +Start detached worker node from the Django management utility. + +""" +from __future__ import absolute_import, unicode_literals + +import os +import sys + +from celery.bin import celeryd_detach + +from djcelery.management.base import CeleryCommand + + +class Command(CeleryCommand): + """Run the celery daemon.""" + help = 'Runs a detached Celery worker node.' + options = celeryd_detach.OPTION_LIST + + def run_from_argv(self, argv): + + class detached(celeryd_detach.detached_celeryd): + execv_argv = [os.path.abspath(sys.argv[0]), 'celery', 'worker'] + detached().execute_from_commandline(argv) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd_multi.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd_multi.py new file mode 100644 index 0000000..a852302 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celeryd_multi.py @@ -0,0 +1,25 @@ +""" + +Utility to manage multiple worker instances. + +""" +from __future__ import absolute_import, unicode_literals + +from celery.bin import multi + +from djcelery.management.base import CeleryCommand + + +class Command(CeleryCommand): + """Run the celery daemon.""" + args = '[name1, [name2, [...]> [worker options]' + help = 'Manage multiple Celery worker nodes.' + options = () + keep_base_opts = True + + def run_from_argv(self, argv): + argv = self.handle_default_options(argv) + argv.append('--cmd={0[0]} celeryd_detach'.format(argv)) + multi.MultiTool().execute_from_commandline( + ['{0[0]} {0[1]}'.format(argv)] + argv[2:], + ) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerymon.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerymon.py new file mode 100644 index 0000000..91317a4 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/celerymon.py @@ -0,0 +1,42 @@ +""" + +Start the celery clock service from the Django management command. + +""" +from __future__ import absolute_import, unicode_literals + +import sys + +from djcelery.app import app +from djcelery.management.base import CeleryCommand + +try: + from celerymon.bin.celerymon import MonitorCommand + mon = MonitorCommand(app=app) +except ImportError: + mon = None + +MISSING = """ +You don't have celerymon installed, please install it by running the following +command: + + $ pip install -U celerymon + +or if you're still using easy_install (shame on you!) + + $ easy_install -U celerymon +""" + + +class Command(CeleryCommand): + """Run the celery monitor.""" + options = (CeleryCommand.options + + (mon and mon.get_options() + mon.preload_options or ())) + help = 'Run the celery monitor' + + def handle(self, *args, **options): + """Handle the management command.""" + if mon is None: + sys.stderr.write(MISSING) + else: + mon.run(**options) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/djcelerymon.py b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/djcelerymon.py new file mode 100644 index 0000000..29a09fc --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/management/commands/djcelerymon.py @@ -0,0 +1,48 @@ +from __future__ import absolute_import, unicode_literals + +import sys +import threading + +from celery.bin import events + +from django.core.management.commands import runserver + +from djcelery.app import app +from djcelery.management.base import CeleryCommand + +ev = events.events(app=app) + + +class WebserverThread(threading.Thread): + + def __init__(self, addrport='', *args, **options): + threading.Thread.__init__(self) + self.addrport = addrport + self.args = args + self.options = options + + def run(self): + options = dict(self.options, use_reloader=False) + command = runserver.Command() + # see http://code.djangoproject.com/changeset/13319 + command.stdout, command.stderr = sys.stdout, sys.stderr + command.handle(self.addrport, *self.args, **options) + + +class Command(CeleryCommand): + """Run the celery curses event viewer.""" + args = '[optional port number, or ipaddr:port]' + options = (runserver.Command.option_list + + ev.get_options() + + ev.preload_options) + help = 'Starts Django Admin instance and celerycam in the same process.' + # see http://code.djangoproject.com/changeset/13319. + stdout, stderr = sys.stdout, sys.stderr + + def handle(self, addrport='', *args, **options): + """Handle the management command.""" + server = WebserverThread(addrport, *args, **options) + server.start() + options['camera'] = 'djcelery.snapshot.Camera' + options['prog_name'] = 'djcelerymon' + ev.run(*args, **options) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/managers.py b/thesisenv/lib/python3.6/site-packages/djcelery/managers.py new file mode 100644 index 0000000..91dae00 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/managers.py @@ -0,0 +1,243 @@ +from __future__ import absolute_import, unicode_literals + +import warnings + +from functools import wraps +from itertools import count + +from django.db import connection +try: + from django.db import connections, router +except ImportError: # pre-Django 1.2 + connections = router = None # noqa + +from django.db import models +from django.db.models.query import QuerySet +from django.conf import settings + +try: + from celery.utils.timeutils import maybe_timedelta +except ImportError: + from celery.utils.time import maybe_timedelta + +from .db import commit_on_success, get_queryset, rollback_unless_managed +from .utils import now + + +class TxIsolationWarning(UserWarning): + pass + + +def transaction_retry(max_retries=1): + """Decorator for methods doing database operations. + + If the database operation fails, it will retry the operation + at most ``max_retries`` times. + + """ + def _outer(fun): + + @wraps(fun) + def _inner(*args, **kwargs): + _max_retries = kwargs.pop('exception_retry_count', max_retries) + for retries in count(0): + try: + return fun(*args, **kwargs) + except Exception: # pragma: no cover + # Depending on the database backend used we can experience + # various exceptions. E.g. psycopg2 raises an exception + # if some operation breaks the transaction, so saving + # the task result won't be possible until we rollback + # the transaction. + if retries >= _max_retries: + raise + try: + rollback_unless_managed() + except Exception: + pass + return _inner + + return _outer + + +def update_model_with_dict(obj, fields): + [setattr(obj, attr_name, attr_value) + for attr_name, attr_value in fields.items()] + obj.save() + return obj + + +class ExtendedQuerySet(QuerySet): + + def update_or_create(self, **kwargs): + obj, created = self.get_or_create(**kwargs) + + if not created: + fields = dict(kwargs.pop('defaults', {})) + fields.update(kwargs) + update_model_with_dict(obj, fields) + + return obj, created + + +class ExtendedManager(models.Manager): + + def get_queryset(self): + return ExtendedQuerySet(self.model) + get_query_set = get_queryset # Pre django 1.6 + + def update_or_create(self, **kwargs): + return get_queryset(self).update_or_create(**kwargs) + + def connection_for_write(self): + if connections: + return connections[router.db_for_write(self.model)] + return connection + + def connection_for_read(self): + if connections: + return connections[self.db] + return connection + + def current_engine(self): + try: + return settings.DATABASES[self.db]['ENGINE'] + except AttributeError: + return settings.DATABASE_ENGINE + + +class ResultManager(ExtendedManager): + + def get_all_expired(self, expires): + """Get all expired task results.""" + return self.filter(date_done__lt=now() - maybe_timedelta(expires)) + + def delete_expired(self, expires): + """Delete all expired taskset results.""" + meta = self.model._meta + with commit_on_success(): + self.get_all_expired(expires).update(hidden=True) + cursor = self.connection_for_write().cursor() + cursor.execute( + 'DELETE FROM {0.db_table} WHERE hidden=%s'.format(meta), + (True, ), + ) + + +class PeriodicTaskManager(ExtendedManager): + + def enabled(self): + return self.filter(enabled=True) + + +class TaskManager(ResultManager): + """Manager for :class:`celery.models.Task` models.""" + _last_id = None + + def get_task(self, task_id): + """Get task meta for task by ``task_id``. + + :keyword exception_retry_count: How many times to retry by + transaction rollback on exception. This could theoretically + happen in a race condition if another worker is trying to + create the same task. The default is to retry once. + + """ + try: + return self.get(task_id=task_id) + except self.model.DoesNotExist: + if self._last_id == task_id: + self.warn_if_repeatable_read() + self._last_id = task_id + return self.model(task_id=task_id) + + @transaction_retry(max_retries=2) + def store_result(self, task_id, result, status, + traceback=None, children=None): + """Store the result and status of a task. + + :param task_id: task id + + :param result: The return value of the task, or an exception + instance raised by the task. + + :param status: Task status. See + :meth:`celery.result.AsyncResult.get_status` for a list of + possible status values. + + :keyword traceback: The traceback at the point of exception (if the + task failed). + + :keyword children: List of serialized results of subtasks + of this task. + + :keyword exception_retry_count: How many times to retry by + transaction rollback on exception. This could theoretically + happen in a race condition if another worker is trying to + create the same task. The default is to retry twice. + + """ + return self.update_or_create(task_id=task_id, + defaults={'status': status, + 'result': result, + 'traceback': traceback, + 'meta': {'children': children}}) + + def warn_if_repeatable_read(self): + if 'mysql' in self.current_engine().lower(): + cursor = self.connection_for_read().cursor() + if cursor.execute('SELECT @@tx_isolation'): + isolation = cursor.fetchone()[0] + if isolation == 'REPEATABLE-READ': + warnings.warn(TxIsolationWarning( + 'Polling results with transaction isolation level ' + 'repeatable-read within the same transaction ' + 'may give outdated results. Be sure to commit the ' + 'transaction for each poll iteration.')) + + +class TaskSetManager(ResultManager): + """Manager for :class:`celery.models.TaskSet` models.""" + + def restore_taskset(self, taskset_id): + """Get the async result instance by taskset id.""" + try: + return self.get(taskset_id=taskset_id) + except self.model.DoesNotExist: + pass + + def delete_taskset(self, taskset_id): + """Delete a saved taskset result.""" + s = self.restore_taskset(taskset_id) + if s: + s.delete() + + @transaction_retry(max_retries=2) + def store_result(self, taskset_id, result): + """Store the async result instance of a taskset. + + :param taskset_id: task set id + + :param result: The return value of the taskset + + """ + return self.update_or_create(taskset_id=taskset_id, + defaults={'result': result}) + + +class TaskStateManager(ExtendedManager): + + def active(self): + return self.filter(hidden=False) + + def expired(self, states, expires, nowfun=now): + return self.filter(state__in=states, + tstamp__lte=nowfun() - maybe_timedelta(expires)) + + def expire_by_states(self, states, expires): + if expires is not None: + return self.expired(states, expires).update(hidden=True) + + def purge(self): + with commit_on_success(): + self.model.objects.filter(hidden=True).delete() diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/migrations/0001_initial.py b/thesisenv/lib/python3.6/site-packages/djcelery/migrations/0001_initial.py new file mode 100644 index 0000000..75fe231 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/migrations/0001_initial.py @@ -0,0 +1,163 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import models, migrations +import djcelery.picklefield + + +class Migration(migrations.Migration): + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='CrontabSchedule', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('minute', models.CharField(default='*', max_length=64, verbose_name='minute')), + ('hour', models.CharField(default='*', max_length=64, verbose_name='hour')), + ('day_of_week', models.CharField(default='*', max_length=64, verbose_name='day of week')), + ('day_of_month', models.CharField(default='*', max_length=64, verbose_name='day of month')), + ('month_of_year', models.CharField(default='*', max_length=64, verbose_name='month of year')), + ], + options={ + 'ordering': ['month_of_year', 'day_of_month', 'day_of_week', 'hour', 'minute'], + 'verbose_name': 'crontab', + 'verbose_name_plural': 'crontabs', + }, + bases=(models.Model,), + ), + migrations.CreateModel( + name='IntervalSchedule', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('every', models.IntegerField(verbose_name='every')), + ('period', models.CharField(max_length=24, verbose_name='period', choices=[('days', 'Days'), ('hours', 'Hours'), ('minutes', 'Minutes'), ('seconds', 'Seconds'), ('microseconds', 'Microseconds')])), + ], + options={ + 'ordering': ['period', 'every'], + 'verbose_name': 'interval', + 'verbose_name_plural': 'intervals', + }, + bases=(models.Model,), + ), + migrations.CreateModel( + name='PeriodicTask', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('name', models.CharField(help_text='Useful description', unique=True, max_length=200, verbose_name='name')), + ('task', models.CharField(max_length=200, verbose_name='task name')), + ('args', models.TextField(default='[]', help_text='JSON encoded positional arguments', verbose_name='Arguments', blank=True)), + ('kwargs', models.TextField(default='{}', help_text='JSON encoded keyword arguments', verbose_name='Keyword arguments', blank=True)), + ('queue', models.CharField(default=None, max_length=200, blank=True, help_text='Queue defined in CELERY_QUEUES', null=True, verbose_name='queue')), + ('exchange', models.CharField(default=None, max_length=200, null=True, verbose_name='exchange', blank=True)), + ('routing_key', models.CharField(default=None, max_length=200, null=True, verbose_name='routing key', blank=True)), + ('expires', models.DateTimeField(null=True, verbose_name='expires', blank=True)), + ('enabled', models.BooleanField(default=True, verbose_name='enabled')), + ('last_run_at', models.DateTimeField(null=True, editable=False, blank=True)), + ('total_run_count', models.PositiveIntegerField(default=0, editable=False)), + ('date_changed', models.DateTimeField(auto_now=True)), + ('description', models.TextField(verbose_name='description', blank=True)), + ('crontab', models.ForeignKey(blank=True, to='djcelery.CrontabSchedule', help_text='Use one of interval/crontab', null=True, verbose_name='crontab', on_delete=models.CASCADE)), + ('interval', models.ForeignKey(verbose_name='interval', blank=True, to='djcelery.IntervalSchedule', null=True, on_delete=models.CASCADE)), + ], + options={ + 'verbose_name': 'periodic task', + 'verbose_name_plural': 'periodic tasks', + }, + bases=(models.Model,), + ), + migrations.CreateModel( + name='PeriodicTasks', + fields=[ + ('ident', models.SmallIntegerField(default=1, unique=True, serialize=False, primary_key=True)), + ('last_update', models.DateTimeField()), + ], + options={ + }, + bases=(models.Model,), + ), + migrations.CreateModel( + name='TaskMeta', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('task_id', models.CharField(unique=True, max_length=255, verbose_name='task id')), + ('status', models.CharField(default='PENDING', max_length=50, verbose_name='state', choices=[('FAILURE', 'FAILURE'), ('PENDING', 'PENDING'), ('RECEIVED', 'RECEIVED'), ('RETRY', 'RETRY'), ('REVOKED', 'REVOKED'), ('STARTED', 'STARTED'), ('SUCCESS', 'SUCCESS')])), + ('result', djcelery.picklefield.PickledObjectField(default=None, null=True, editable=False)), + ('date_done', models.DateTimeField(auto_now=True, verbose_name='done at')), + ('traceback', models.TextField(null=True, verbose_name='traceback', blank=True)), + ('hidden', models.BooleanField(default=False, db_index=True, editable=False)), + ('meta', djcelery.picklefield.PickledObjectField(default=None, null=True, editable=False)), + ], + options={ + 'db_table': 'celery_taskmeta', + 'verbose_name': 'task state', + 'verbose_name_plural': 'task states', + }, + bases=(models.Model,), + ), + migrations.CreateModel( + name='TaskSetMeta', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('taskset_id', models.CharField(unique=True, max_length=255, verbose_name='group id')), + ('result', djcelery.picklefield.PickledObjectField(editable=False)), + ('date_done', models.DateTimeField(auto_now=True, verbose_name='created at')), + ('hidden', models.BooleanField(default=False, db_index=True, editable=False)), + ], + options={ + 'db_table': 'celery_tasksetmeta', + 'verbose_name': 'saved group result', + 'verbose_name_plural': 'saved group results', + }, + bases=(models.Model,), + ), + migrations.CreateModel( + name='TaskState', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('state', models.CharField(db_index=True, max_length=64, verbose_name='state', choices=[('FAILURE', 'FAILURE'), ('PENDING', 'PENDING'), ('RECEIVED', 'RECEIVED'), ('RETRY', 'RETRY'), ('REVOKED', 'REVOKED'), ('STARTED', 'STARTED'), ('SUCCESS', 'SUCCESS')])), + ('task_id', models.CharField(unique=True, max_length=36, verbose_name='UUID')), + ('name', models.CharField(max_length=200, null=True, verbose_name='name', db_index=True)), + ('tstamp', models.DateTimeField(verbose_name='event received at', db_index=True)), + ('args', models.TextField(null=True, verbose_name='Arguments')), + ('kwargs', models.TextField(null=True, verbose_name='Keyword arguments')), + ('eta', models.DateTimeField(null=True, verbose_name='ETA')), + ('expires', models.DateTimeField(null=True, verbose_name='expires')), + ('result', models.TextField(null=True, verbose_name='result')), + ('traceback', models.TextField(null=True, verbose_name='traceback')), + ('runtime', models.FloatField(help_text='in seconds if task succeeded', null=True, verbose_name='execution time')), + ('retries', models.IntegerField(default=0, verbose_name='number of retries')), + ('hidden', models.BooleanField(default=False, db_index=True, editable=False)), + ], + options={ + 'ordering': ['-tstamp'], + 'get_latest_by': 'tstamp', + 'verbose_name': 'task', + 'verbose_name_plural': 'tasks', + }, + bases=(models.Model,), + ), + migrations.CreateModel( + name='WorkerState', + fields=[ + ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), + ('hostname', models.CharField(unique=True, max_length=255, verbose_name='hostname')), + ('last_heartbeat', models.DateTimeField(null=True, verbose_name='last heartbeat', db_index=True)), + ], + options={ + 'ordering': ['-last_heartbeat'], + 'get_latest_by': 'last_heartbeat', + 'verbose_name': 'worker', + 'verbose_name_plural': 'workers', + }, + bases=(models.Model,), + ), + migrations.AddField( + model_name='taskstate', + name='worker', + field=models.ForeignKey(verbose_name='worker', to='djcelery.WorkerState', null=True, on_delete=models.CASCADE), + preserve_default=True, + ), + ] diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/migrations/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/models.py b/thesisenv/lib/python3.6/site-packages/djcelery/models.py new file mode 100644 index 0000000..be90f95 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/models.py @@ -0,0 +1,381 @@ +from __future__ import absolute_import, unicode_literals + +from datetime import timedelta +from time import time, mktime, gmtime + +from django.core.exceptions import MultipleObjectsReturned, ValidationError +from django.db import models +from django.db.models import signals +from django.utils.translation import ugettext_lazy as _ +from django.conf import settings + +from celery import schedules +from celery import states +from celery.events.state import heartbeat_expires + +from . import managers +from .picklefield import PickledObjectField +from .utils import now +from .compat import python_2_unicode_compatible + +ALL_STATES = sorted(states.ALL_STATES) +TASK_STATE_CHOICES = sorted(zip(ALL_STATES, ALL_STATES)) + + +def cronexp(field): + return field and str(field).replace(' ', '') or '*' + + +@python_2_unicode_compatible +class TaskMeta(models.Model): + """Task result/status.""" + task_id = models.CharField(_('task id'), max_length=255, unique=True) + status = models.CharField( + _('state'), + max_length=50, default=states.PENDING, choices=TASK_STATE_CHOICES, + ) + result = PickledObjectField(null=True, default=None, editable=False) + date_done = models.DateTimeField(_('done at'), auto_now=True) + traceback = models.TextField(_('traceback'), blank=True, null=True) + hidden = models.BooleanField(editable=False, default=False, db_index=True) + # TODO compression was enabled by mistake, we need to disable it + # but this is a backwards incompatible change that needs planning. + meta = PickledObjectField( + compress=True, null=True, default=None, editable=False, + ) + + objects = managers.TaskManager() + + class Meta: + verbose_name = _('task state') + verbose_name_plural = _('task states') + db_table = 'celery_taskmeta' + + def to_dict(self): + return {'task_id': self.task_id, + 'status': self.status, + 'result': self.result, + 'date_done': self.date_done, + 'traceback': self.traceback, + 'children': (self.meta or {}).get('children')} + + def __str__(self): + return ''.format(self) + + +@python_2_unicode_compatible +class TaskSetMeta(models.Model): + """TaskSet result""" + taskset_id = models.CharField(_('group id'), max_length=255, unique=True) + result = PickledObjectField() + date_done = models.DateTimeField(_('created at'), auto_now=True) + hidden = models.BooleanField(editable=False, default=False, db_index=True) + + objects = managers.TaskSetManager() + + class Meta: + """Model meta-data.""" + verbose_name = _('saved group result') + verbose_name_plural = _('saved group results') + db_table = 'celery_tasksetmeta' + + def to_dict(self): + return {'taskset_id': self.taskset_id, + 'result': self.result, + 'date_done': self.date_done} + + def __str__(self): + return ''.format(self) + + +PERIOD_CHOICES = (('days', _('Days')), + ('hours', _('Hours')), + ('minutes', _('Minutes')), + ('seconds', _('Seconds')), + ('microseconds', _('Microseconds'))) + + +@python_2_unicode_compatible +class IntervalSchedule(models.Model): + every = models.IntegerField(_('every'), null=False) + period = models.CharField( + _('period'), max_length=24, choices=PERIOD_CHOICES, + ) + + class Meta: + verbose_name = _('interval') + verbose_name_plural = _('intervals') + ordering = ['period', 'every'] + + @property + def schedule(self): + return schedules.schedule(timedelta(**{self.period: self.every})) + + @classmethod + def from_schedule(cls, schedule, period='seconds'): + every = max(schedule.run_every.total_seconds(), 0) + try: + return cls.objects.get(every=every, period=period) + except cls.DoesNotExist: + return cls(every=every, period=period) + except MultipleObjectsReturned: + cls.objects.filter(every=every, period=period).delete() + return cls(every=every, period=period) + + def __str__(self): + if self.every == 1: + return _('every {0.period_singular}').format(self) + return _('every {0.every:d} {0.period}').format(self) + + @property + def period_singular(self): + return self.period[:-1] + + +@python_2_unicode_compatible +class CrontabSchedule(models.Model): + minute = models.CharField(_('minute'), max_length=64, default='*') + hour = models.CharField(_('hour'), max_length=64, default='*') + day_of_week = models.CharField( + _('day of week'), max_length=64, default='*', + ) + day_of_month = models.CharField( + _('day of month'), max_length=64, default='*', + ) + month_of_year = models.CharField( + _('month of year'), max_length=64, default='*', + ) + + class Meta: + verbose_name = _('crontab') + verbose_name_plural = _('crontabs') + ordering = ['month_of_year', 'day_of_month', + 'day_of_week', 'hour', 'minute'] + + def __str__(self): + return '{0} {1} {2} {3} {4} (m/h/d/dM/MY)'.format( + cronexp(self.minute), + cronexp(self.hour), + cronexp(self.day_of_week), + cronexp(self.day_of_month), + cronexp(self.month_of_year), + ) + + @property + def schedule(self): + return schedules.crontab(minute=self.minute, + hour=self.hour, + day_of_week=self.day_of_week, + day_of_month=self.day_of_month, + month_of_year=self.month_of_year) + + @classmethod + def from_schedule(cls, schedule): + spec = {'minute': schedule._orig_minute, + 'hour': schedule._orig_hour, + 'day_of_week': schedule._orig_day_of_week, + 'day_of_month': schedule._orig_day_of_month, + 'month_of_year': schedule._orig_month_of_year} + try: + return cls.objects.get(**spec) + except cls.DoesNotExist: + return cls(**spec) + except MultipleObjectsReturned: + cls.objects.filter(**spec).delete() + return cls(**spec) + + +class PeriodicTasks(models.Model): + ident = models.SmallIntegerField(default=1, primary_key=True, unique=True) + last_update = models.DateTimeField(null=False) + + objects = managers.ExtendedManager() + + @classmethod + def changed(cls, instance, **kwargs): + if not instance.no_changes: + cls.objects.update_or_create(ident=1, + defaults={'last_update': now()}) + + @classmethod + def last_change(cls): + try: + return cls.objects.get(ident=1).last_update + except cls.DoesNotExist: + pass + + +@python_2_unicode_compatible +class PeriodicTask(models.Model): + name = models.CharField( + _('name'), max_length=200, unique=True, + help_text=_('Useful description'), + ) + task = models.CharField(_('task name'), max_length=200) + interval = models.ForeignKey( + IntervalSchedule, + null=True, blank=True, verbose_name=_('interval'), + on_delete=models.CASCADE, + ) + crontab = models.ForeignKey( + CrontabSchedule, null=True, blank=True, verbose_name=_('crontab'), + on_delete=models.CASCADE, + help_text=_('Use one of interval/crontab'), + ) + args = models.TextField( + _('Arguments'), blank=True, default='[]', + help_text=_('JSON encoded positional arguments'), + ) + kwargs = models.TextField( + _('Keyword arguments'), blank=True, default='{}', + help_text=_('JSON encoded keyword arguments'), + ) + queue = models.CharField( + _('queue'), max_length=200, blank=True, null=True, default=None, + help_text=_('Queue defined in CELERY_QUEUES'), + ) + exchange = models.CharField( + _('exchange'), max_length=200, blank=True, null=True, default=None, + ) + routing_key = models.CharField( + _('routing key'), max_length=200, blank=True, null=True, default=None, + ) + expires = models.DateTimeField( + _('expires'), blank=True, null=True, + ) + enabled = models.BooleanField( + _('enabled'), default=True, + ) + last_run_at = models.DateTimeField( + auto_now=False, auto_now_add=False, + editable=False, blank=True, null=True, + ) + total_run_count = models.PositiveIntegerField( + default=0, editable=False, + ) + date_changed = models.DateTimeField(auto_now=True) + description = models.TextField(_('description'), blank=True) + + objects = managers.PeriodicTaskManager() + no_changes = False + + class Meta: + verbose_name = _('periodic task') + verbose_name_plural = _('periodic tasks') + + def validate_unique(self, *args, **kwargs): + super(PeriodicTask, self).validate_unique(*args, **kwargs) + if not self.interval and not self.crontab: + raise ValidationError( + {'interval': ['One of interval or crontab must be set.']}) + if self.interval and self.crontab: + raise ValidationError( + {'crontab': ['Only one of interval or crontab must be set']}) + + def save(self, *args, **kwargs): + self.exchange = self.exchange or None + self.routing_key = self.routing_key or None + self.queue = self.queue or None + if not self.enabled: + self.last_run_at = None + super(PeriodicTask, self).save(*args, **kwargs) + + def __str__(self): + fmt = '{0.name}: {{no schedule}}' + if self.interval: + fmt = '{0.name}: {0.interval}' + if self.crontab: + fmt = '{0.name}: {0.crontab}' + return fmt.format(self) + + @property + def schedule(self): + if self.interval: + return self.interval.schedule + if self.crontab: + return self.crontab.schedule + + +signals.pre_delete.connect(PeriodicTasks.changed, sender=PeriodicTask) +signals.pre_save.connect(PeriodicTasks.changed, sender=PeriodicTask) + + +class WorkerState(models.Model): + hostname = models.CharField(_('hostname'), max_length=255, unique=True) + last_heartbeat = models.DateTimeField(_('last heartbeat'), null=True, + db_index=True) + + objects = managers.ExtendedManager() + + class Meta: + """Model meta-data.""" + verbose_name = _('worker') + verbose_name_plural = _('workers') + get_latest_by = 'last_heartbeat' + ordering = ['-last_heartbeat'] + + def __str__(self): + return self.hostname + + def __repr__(self): + return ''.format(self) + + def is_alive(self): + if self.last_heartbeat: + # Use UTC timestamp if USE_TZ is true, or else use local timestamp + timestamp = mktime(gmtime()) if settings.USE_TZ else time() + return timestamp < heartbeat_expires(self.heartbeat_timestamp) + return False + + @property + def heartbeat_timestamp(self): + return mktime(self.last_heartbeat.timetuple()) + + +@python_2_unicode_compatible +class TaskState(models.Model): + state = models.CharField( + _('state'), max_length=64, choices=TASK_STATE_CHOICES, db_index=True, + ) + task_id = models.CharField(_('UUID'), max_length=36, unique=True) + name = models.CharField( + _('name'), max_length=200, null=True, db_index=True, + ) + tstamp = models.DateTimeField(_('event received at'), db_index=True) + args = models.TextField(_('Arguments'), null=True) + kwargs = models.TextField(_('Keyword arguments'), null=True) + eta = models.DateTimeField(_('ETA'), null=True) + expires = models.DateTimeField(_('expires'), null=True) + result = models.TextField(_('result'), null=True) + traceback = models.TextField(_('traceback'), null=True) + runtime = models.FloatField( + _('execution time'), null=True, + help_text=_('in seconds if task succeeded'), + ) + retries = models.IntegerField(_('number of retries'), default=0) + worker = models.ForeignKey( + WorkerState, null=True, verbose_name=_('worker'), + on_delete=models.CASCADE, + ) + hidden = models.BooleanField(editable=False, default=False, db_index=True) + + objects = managers.TaskStateManager() + + class Meta: + """Model meta-data.""" + verbose_name = _('task') + verbose_name_plural = _('tasks') + get_latest_by = 'tstamp' + ordering = ['-tstamp'] + + def __str__(self): + name = self.name or 'UNKNOWN' + s = '{0.state:<10} {0.task_id:<36} {1}'.format(self, name) + if self.eta: + s += ' eta:{0.eta}'.format(self) + return s + + def __repr__(self): + return ''.format( + self, self.name or 'UNKNOWN', + ) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/mon.py b/thesisenv/lib/python3.6/site-packages/djcelery/mon.py new file mode 100644 index 0000000..860e07a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/mon.py @@ -0,0 +1,77 @@ +from __future__ import absolute_import, unicode_literals + +import os +import sys +import types + +from celery.app.defaults import strtobool +from celery.utils import import_from_cwd + +from djcelery.compat import setenv + +DEFAULT_APPS = ('django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.admin', + 'django.contrib.admindocs', + 'djcelery', + ) + +DEFAULTS = {'ROOT_URLCONF': 'djcelery.monproj.urls', + 'DATABASE_ENGINE': 'sqlite3', + 'DATABASE_NAME': 'djcelerymon.db', + 'DATABASES': {'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': 'djcelerymon.db'}}, + 'BROKER_URL': 'amqp://', + 'SITE_ID': 1, + 'INSTALLED_APPS': DEFAULT_APPS, + 'DEBUG': strtobool(os.environ.get('DJCELERYMON_DEBUG', '0')) + } + + +def default_settings(name='__default_settings__'): + c = type(name, (types.ModuleType, ), DEFAULTS)(name) + c.__dict__.update({'__file__': __file__}) + sys.modules[name] = c + return name + + +def configure(): + from celery import current_app + from celery.loaders.default import DEFAULT_CONFIG_MODULE + from django.conf import settings + + app = current_app + conf = {} + + if not settings.configured: + if 'loader' in app.__dict__ and app.loader.configured: + conf = current_app.loader.conf + else: + os.environ.pop('CELERY_LOADER', None) + settings_module = os.environ.get('CELERY_CONFIG_MODULE', + DEFAULT_CONFIG_MODULE) + try: + import_from_cwd(settings_module) + except ImportError: + settings_module = default_settings() + settings.configure(SETTINGS_MODULE=settings_module, + **dict(DEFAULTS, **conf)) + + +def run_monitor(argv): + from .management.commands import djcelerymon + djcelerymon.Command().run_from_argv([argv[0], 'djcelerymon'] + argv[1:]) + + +def main(argv=sys.argv): + from django.core import management + setenv('CELERY_LOADER', 'default') + configure() + management.call_command('migrate') + run_monitor(argv) + + +if __name__ == '__main__': + main() diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/monproj/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/monproj/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/monproj/urls.py b/thesisenv/lib/python3.6/site-packages/djcelery/monproj/urls.py new file mode 100644 index 0000000..d3d95f6 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/monproj/urls.py @@ -0,0 +1,16 @@ +from __future__ import absolute_import, unicode_literals + +from django.conf.urls import include, url +from django.contrib import admin + + +urlpatterns = [ + # Uncomment the admin/doc line below and add 'django.contrib.admindocs' + # to INSTALLED_APPS to enable admin documentation: + url( + r'^doc/', + include('django.contrib.admindocs.urls') + ), + + url(r'', include(admin.site.urls)), +] diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/picklefield.py b/thesisenv/lib/python3.6/site-packages/djcelery/picklefield.py new file mode 100644 index 0000000..a7dc5bd --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/picklefield.py @@ -0,0 +1,128 @@ +""" + Based on django-picklefield which is + Copyright (c) 2009-2010 Gintautas Miliauskas + but some improvements including not deepcopying values. + + Provides an implementation of a pickled object field. + Such fields can contain any picklable objects. + + The implementation is taken and adopted from Django snippet #1694 + by Taavi Taijala, + which is in turn based on Django snippet #513 + by Oliver Beattie. + +""" +from __future__ import absolute_import, unicode_literals + +import django + +from base64 import b64encode, b64decode +from zlib import compress, decompress + +from celery.five import with_metaclass +from celery.utils.serialization import pickle +from kombu.utils.encoding import bytes_to_str, str_to_bytes + +from django.db import models + +try: + from django.utils.encoding import force_text +except ImportError: + from django.utils.encoding import force_unicode as force_text # noqa + +DEFAULT_PROTOCOL = 2 + +NO_DECOMPRESS_HEADER = b'\x1e\x00r8d9qwwerwhA@' + + +if django.VERSION >= (1, 8): + BaseField = models.Field +else: + @with_metaclass(models.SubfieldBase, skip_attrs=set([ + 'db_type', + 'get_db_prep_save' + ])) + class BaseField(models.Field): # noqa + pass + + +class PickledObject(str): + pass + + +def maybe_compress(value, do_compress=False): + if do_compress: + return compress(str_to_bytes(value)) + return value + + +def maybe_decompress(value, do_decompress=False): + if do_decompress: + if str_to_bytes(value[:15]) != NO_DECOMPRESS_HEADER: + return decompress(str_to_bytes(value)) + return value + + +def encode(value, compress_object=False, pickle_protocol=DEFAULT_PROTOCOL): + return bytes_to_str(b64encode(maybe_compress( + pickle.dumps(value, pickle_protocol), compress_object), + )) + + +def decode(value, compress_object=False): + return pickle.loads(maybe_decompress(b64decode(value), compress_object)) + + +class PickledObjectField(BaseField): + + def __init__(self, compress=False, protocol=DEFAULT_PROTOCOL, + *args, **kwargs): + self.compress = compress + self.protocol = protocol + kwargs.setdefault('editable', False) + super(PickledObjectField, self).__init__(*args, **kwargs) + + def get_default(self): + if self.has_default(): + return self.default() if callable(self.default) else self.default + return super(PickledObjectField, self).get_default() + + def to_python(self, value): + if value is not None: + try: + return decode(value, self.compress) + except Exception: + if isinstance(value, PickledObject): + raise + return value + + def from_db_value(self, value, expression, connection, context): + return self.to_python(value) + + def get_db_prep_value(self, value, **kwargs): + if value is not None and not isinstance(value, PickledObject): + return force_text(encode(value, self.compress, self.protocol)) + return value + + def value_to_string(self, obj): + return self.get_db_prep_value(self._get_val_from_obj(obj)) + + def get_internal_type(self): + return 'TextField' + + def get_db_prep_lookup(self, lookup_type, value, *args, **kwargs): + if lookup_type not in ['exact', 'in', 'isnull']: + raise TypeError( + 'Lookup type {0} is not supported.'.format(lookup_type)) + return super(PickledObjectField, self) \ + .get_db_prep_lookup(*args, **kwargs) + + +try: + from south.modelsinspector import add_introspection_rules +except ImportError: + pass +else: + add_introspection_rules( + [], [r'^djcelery\.picklefield\.PickledObjectField'], + ) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/schedulers.py b/thesisenv/lib/python3.6/site-packages/djcelery/schedulers.py new file mode 100644 index 0000000..29d32b9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/schedulers.py @@ -0,0 +1,282 @@ +from __future__ import absolute_import + +import logging + +from multiprocessing.util import Finalize + +from anyjson import loads, dumps +from celery import current_app +from celery import schedules +from celery.beat import Scheduler, ScheduleEntry +from celery.utils.encoding import safe_str, safe_repr +from celery.utils.log import get_logger + +try: + from celery.utils.timeutils import is_naive +except ImportError: + from celery.utils.time import is_naive + +from django.db import transaction +from django.core.exceptions import ObjectDoesNotExist + +from .db import commit_on_success +from .models import (PeriodicTask, PeriodicTasks, + CrontabSchedule, IntervalSchedule) +from .utils import DATABASE_ERRORS, make_aware +from .compat import itervalues + +# This scheduler must wake up more frequently than the +# regular of 5 minutes because it needs to take external +# changes to the schedule into account. +DEFAULT_MAX_INTERVAL = 5 # seconds + +ADD_ENTRY_ERROR = """\ +Couldn't add entry %r to database schedule: %r. Contents: %r +""" + +logger = get_logger(__name__) +debug, info, error = logger.debug, logger.info, logger.error + + +class ModelEntry(ScheduleEntry): + model_schedules = ((schedules.crontab, CrontabSchedule, 'crontab'), + (schedules.schedule, IntervalSchedule, 'interval')) + save_fields = ['last_run_at', 'total_run_count', 'no_changes'] + + def __init__(self, model): + self.app = current_app._get_current_object() + self.name = model.name + self.task = model.task + try: + self.schedule = model.schedule + except model.DoesNotExist: + logger.error('Schedule was removed from database') + logger.warning('Disabling %s', self.name) + self._disable(model) + try: + self.args = loads(model.args or '[]') + self.kwargs = loads(model.kwargs or '{}') + except ValueError: + logging.error('Failed to serialize arguments for %s.', self.name, + exc_info=1) + logging.warning('Disabling %s', self.name) + self._disable(model) + + self.options = {'queue': model.queue, + 'exchange': model.exchange, + 'routing_key': model.routing_key, + 'expires': model.expires} + self.total_run_count = model.total_run_count + self.model = model + + if not model.last_run_at: + model.last_run_at = self._default_now() + orig = self.last_run_at = model.last_run_at + if not is_naive(self.last_run_at): + self.last_run_at = self.last_run_at.replace(tzinfo=None) + assert orig.hour == self.last_run_at.hour # timezone sanity + + def _disable(self, model): + model.no_changes = True + model.enabled = False + model.save() + + def is_due(self): + if not self.model.enabled: + return False, 5.0 # 5 second delay for re-enable. + return self.schedule.is_due(self.last_run_at) + + def _default_now(self): + return self.app.now() + + def __next__(self): + self.model.last_run_at = self.app.now() + self.model.total_run_count += 1 + self.model.no_changes = True + return self.__class__(self.model) + next = __next__ # for 2to3 + + def save(self): + # Object may not be synchronized, so only + # change the fields we care about. + obj = type(self.model)._default_manager.get(pk=self.model.pk) + for field in self.save_fields: + setattr(obj, field, getattr(self.model, field)) + obj.last_run_at = make_aware(obj.last_run_at) + obj.save() + + @classmethod + def to_model_schedule(cls, schedule): + for schedule_type, model_type, model_field in cls.model_schedules: + schedule = schedules.maybe_schedule(schedule) + if isinstance(schedule, schedule_type): + model_schedule = model_type.from_schedule(schedule) + model_schedule.save() + return model_schedule, model_field + raise ValueError( + 'Cannot convert schedule type {0!r} to model'.format(schedule)) + + @classmethod + def from_entry(cls, name, skip_fields=('relative', 'options'), **entry): + options = entry.get('options') or {} + fields = dict(entry) + for skip_field in skip_fields: + fields.pop(skip_field, None) + schedule = fields.pop('schedule') + model_schedule, model_field = cls.to_model_schedule(schedule) + + # reset schedule + for t in cls.model_schedules: + fields[t[2]] = None + + fields[model_field] = model_schedule + fields['args'] = dumps(fields.get('args') or []) + fields['kwargs'] = dumps(fields.get('kwargs') or {}) + fields['queue'] = options.get('queue') + fields['exchange'] = options.get('exchange') + fields['routing_key'] = options.get('routing_key') + obj, _ = PeriodicTask._default_manager.update_or_create( + name=name, defaults=fields, + ) + return cls(obj) + + def __repr__(self): + return ''.format( + safe_str(self.name), self.task, safe_repr(self.args), + safe_repr(self.kwargs), self.schedule, + ) + + +class DatabaseScheduler(Scheduler): + Entry = ModelEntry + Model = PeriodicTask + Changes = PeriodicTasks + _schedule = None + _last_timestamp = None + _initial_read = False + + def __init__(self, *args, **kwargs): + self._dirty = set() + self._finalize = Finalize(self, self.sync, exitpriority=5) + Scheduler.__init__(self, *args, **kwargs) + self.max_interval = ( + kwargs.get('max_interval') or + self.app.conf.CELERYBEAT_MAX_LOOP_INTERVAL or + DEFAULT_MAX_INTERVAL) + + def setup_schedule(self): + self.install_default_entries(self.schedule) + self.update_from_dict(self.app.conf.CELERYBEAT_SCHEDULE) + + def all_as_schedule(self): + debug('DatabaseScheduler: Fetching database schedule') + s = {} + for model in self.Model.objects.enabled(): + try: + s[model.name] = self.Entry(model) + except ValueError: + pass + return s + + def schedule_changed(self): + try: + # If MySQL is running with transaction isolation level + # REPEATABLE-READ (default), then we won't see changes done by + # other transactions until the current transaction is + # committed (Issue #41). + try: + transaction.commit() + except transaction.TransactionManagementError: + pass # not in transaction management. + + last, ts = self._last_timestamp, self.Changes.last_change() + except DATABASE_ERRORS as exc: + # Close the connection when it is broken + transaction.get_connection().close_if_unusable_or_obsolete() + error('Database gave error: %r', exc, exc_info=1) + return False + try: + if ts and ts > (last if last else ts): + return True + finally: + self._last_timestamp = ts + return False + + def reserve(self, entry): + new_entry = Scheduler.reserve(self, entry) + # Need to store entry by name, because the entry may change + # in the mean time. + self._dirty.add(new_entry.name) + return new_entry + + def sync(self): + info('Writing entries (%s)...', len(self._dirty)) + _tried = set() + try: + with commit_on_success(): + while self._dirty: + try: + name = self._dirty.pop() + _tried.add(name) + self.schedule[name].save() + except (KeyError, ObjectDoesNotExist): + pass + except DATABASE_ERRORS as exc: + # retry later + self._dirty |= _tried + error('Database error while sync: %r', exc, exc_info=1) + + def update_from_dict(self, dict_): + s = {} + for name, entry in dict_.items(): + try: + s[name] = self.Entry.from_entry(name, **entry) + except Exception as exc: + error(ADD_ENTRY_ERROR, name, exc, entry) + self.schedule.update(s) + + def install_default_entries(self, data): + entries = {} + if self.app.conf.CELERY_TASK_RESULT_EXPIRES: + entries.setdefault( + 'celery.backend_cleanup', { + 'task': 'celery.backend_cleanup', + 'schedule': schedules.crontab('0', '4', '*'), + 'options': {'expires': 12 * 3600}, + }, + ) + self.update_from_dict(entries) + + @property + def schedule(self): + update = False + if not self._initial_read: + debug('DatabaseScheduler: intial read') + update = True + self._initial_read = True + elif self.schedule_changed(): + info('DatabaseScheduler: Schedule changed.') + update = True + + if update: + self.sync() + self._schedule = self.all_as_schedule() + if logger.isEnabledFor(logging.DEBUG): + debug('Current schedule:\n%s', '\n'.join( + repr(entry) for entry in itervalues(self._schedule)), + ) + return self._schedule + + @classmethod + def create_or_update_task(cls, name, **schedule_dict): + if 'schedule' not in schedule_dict: + try: + schedule_dict['schedule'] = \ + PeriodicTask._default_manager.get(name=name).schedule + except PeriodicTask.DoesNotExist: + pass + cls.Entry.from_entry(name, **schedule_dict) + + @classmethod + def delete_task(cls, name): + PeriodicTask._default_manager.get(name=name).delete() diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/snapshot.py b/thesisenv/lib/python3.6/site-packages/djcelery/snapshot.py new file mode 100644 index 0000000..39d8af8 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/snapshot.py @@ -0,0 +1,143 @@ +from __future__ import absolute_import, unicode_literals + +from collections import defaultdict +from datetime import timedelta + +from django.conf import settings + +from celery import states +from celery.events.state import Task +from celery.events.snapshot import Polaroid +from celery.five import monotonic +from celery.utils.log import get_logger + +try: + from celery.utils.timeutils import maybe_iso8601 +except ImportError: + from celery.utils.time import maybe_iso8601 + +from .models import WorkerState, TaskState +from .utils import fromtimestamp, correct_awareness + +WORKER_UPDATE_FREQ = 60 # limit worker timestamp write freq. +SUCCESS_STATES = frozenset([states.SUCCESS]) + +# Expiry can be timedelta or None for never expire. +EXPIRE_SUCCESS = getattr(settings, 'CELERYCAM_EXPIRE_SUCCESS', + timedelta(days=1)) +EXPIRE_ERROR = getattr(settings, 'CELERYCAM_EXPIRE_ERROR', + timedelta(days=3)) +EXPIRE_PENDING = getattr(settings, 'CELERYCAM_EXPIRE_PENDING', + timedelta(days=5)) +NOT_SAVED_ATTRIBUTES = frozenset(['name', 'args', 'kwargs', 'eta']) + +logger = get_logger(__name__) +debug = logger.debug + + +class Camera(Polaroid): + TaskState = TaskState + WorkerState = WorkerState + + clear_after = True + worker_update_freq = WORKER_UPDATE_FREQ + expire_states = { + SUCCESS_STATES: EXPIRE_SUCCESS, + states.EXCEPTION_STATES: EXPIRE_ERROR, + states.UNREADY_STATES: EXPIRE_PENDING, + } + + def __init__(self, *args, **kwargs): + super(Camera, self).__init__(*args, **kwargs) + self._last_worker_write = defaultdict(lambda: (None, None)) + + def get_heartbeat(self, worker): + try: + heartbeat = worker.heartbeats[-1] + except IndexError: + return + return fromtimestamp(heartbeat) + + def handle_worker(self, hostname_worker): + (hostname, worker) = hostname_worker + last_write, obj = self._last_worker_write[hostname] + if not last_write or \ + monotonic() - last_write > self.worker_update_freq: + obj, _ = self.WorkerState.objects.update_or_create( + hostname=hostname, + defaults={'last_heartbeat': self.get_heartbeat(worker)}, + ) + self._last_worker_write[hostname] = (monotonic(), obj) + return obj + + def handle_task(self, uuid_task, worker=None): + """Handle snapshotted event.""" + uuid, task = uuid_task + if task.worker and task.worker.hostname: + worker = self.handle_worker( + (task.worker.hostname, task.worker), + ) + + defaults = { + 'name': task.name, + 'args': task.args, + 'kwargs': task.kwargs, + 'eta': correct_awareness(maybe_iso8601(task.eta)), + 'expires': correct_awareness(maybe_iso8601(task.expires)), + 'state': task.state, + 'tstamp': fromtimestamp(task.timestamp), + 'result': task.result or task.exception, + 'traceback': task.traceback, + 'runtime': task.runtime, + 'worker': worker + } + # Some fields are only stored in the RECEIVED event, + # so we should remove these from default values, + # so that they are not overwritten by subsequent states. + [defaults.pop(attr, None) for attr in NOT_SAVED_ATTRIBUTES + if defaults[attr] is None] + return self.update_task(task.state, + task_id=uuid, defaults=defaults) + + def update_task(self, state, **kwargs): + objects = self.TaskState.objects + defaults = kwargs.pop('defaults', None) or {} + if not defaults.get('name'): + return + obj, created = objects.get_or_create(defaults=defaults, **kwargs) + if created: + return obj + else: + if states.state(state) < states.state(obj.state): + keep = Task.merge_rules[states.RECEIVED] + defaults = dict( + (k, v) for k, v in defaults.items() + if k not in keep + ) + + for k, v in defaults.items(): + setattr(obj, k, v) + obj.save() + + return obj + + def on_shutter(self, state, commit_every=100): + + def _handle_tasks(): + for i, task in enumerate(state.tasks.items()): + self.handle_task(task) + + for worker in state.workers.items(): + self.handle_worker(worker) + _handle_tasks() + + def on_cleanup(self): + expired = (self.TaskState.objects.expire_by_states(states, expires) + for states, expires in self.expire_states.items()) + dirty = sum(item for item in expired if item is not None) + if dirty: + debug('Cleanup: Marked %s objects as dirty.', dirty) + self.TaskState.objects.purge() + debug('Cleanup: %s objects purged.', dirty) + return dirty + return 0 diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/static/djcelery/style.css b/thesisenv/lib/python3.6/site-packages/djcelery/static/djcelery/style.css new file mode 100644 index 0000000..b4f4c6a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/static/djcelery/style.css @@ -0,0 +1,4 @@ +.form-row.field-traceback p { + font-family: monospace; + white-space: pre; +} diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/templates/admin/djcelery/change_list.html b/thesisenv/lib/python3.6/site-packages/djcelery/templates/admin/djcelery/change_list.html new file mode 100644 index 0000000..20b269f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/templates/admin/djcelery/change_list.html @@ -0,0 +1,20 @@ +{% extends "admin/change_list.html" %} +{% load i18n %} + +{% block breadcrumbs %} + + {% if wrong_scheduler %} +
    +
  • + Periodic tasks won't be dispatched unless you set the + CELERYBEAT_SCHEDULER setting to + djcelery.schedulers.DatabaseScheduler, + or specify it using the -S option to celerybeat +
  • +
+ {% endif %} +{% endblock %} diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/templates/djcelery/confirm_rate_limit.html b/thesisenv/lib/python3.6/site-packages/djcelery/templates/djcelery/confirm_rate_limit.html new file mode 100644 index 0000000..6152b76 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/templates/djcelery/confirm_rate_limit.html @@ -0,0 +1,25 @@ +{% extends "admin/base_site.html" %} +{% load i18n %} + +{% block breadcrumbs %} + +{% endblock %} + +{% block content %} +
{% csrf_token %} +
+ {% for obj in queryset %} + + {% endfor %} + + + + +
+
+{% endblock %} diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/_compat.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/_compat.py new file mode 100644 index 0000000..4969b5c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/tests/_compat.py @@ -0,0 +1,6 @@ +# coding: utf-8 + +try: + from unittest.mock import patch +except ImportError: + from mock import patch # noqa diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/req.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/req.py new file mode 100644 index 0000000..01a99fb --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/tests/req.py @@ -0,0 +1,76 @@ +from __future__ import absolute_import, unicode_literals + +from django.test import Client +from django.core.handlers.wsgi import WSGIRequest +from django.core.handlers.base import BaseHandler + +from celery.utils.compat import WhateverIO + +from djcelery.compat import unicode + + +class RequestFactory(Client): + """Class that lets you create mock Request objects for use in testing. + + Usage: + + rf = RequestFactory() + get_request = rf.get('/hello/') + post_request = rf.post('/submit/', {'foo': 'bar'}) + + This class re-uses the django.test.client.Client interface, docs here: + http://www.djangoproject.com/documentation/testing/#the-test-client + + Once you have a request object you can pass it to any view function, + just as if that view had been hooked up using a URLconf. + + """ + + def request(self, **request): + """Similar to parent class, but returns the request object as + soon as it has created it.""" + environ = { + 'HTTP_COOKIE': unicode(self.cookies), + 'HTTP_USER_AGENT': 'Django UnitTest Client 1.0', + 'REMOTE_ADDR': '127.0.0.1', + 'PATH_INFO': '/', + 'QUERY_STRING': '', + 'REQUEST_METHOD': 'GET', + 'SCRIPT_NAME': '', + 'SERVER_NAME': 'testserver', + 'SERVER_PORT': 80, + 'SERVER_PROTOCOL': 'HTTP/1.1', + 'wsgi.input': WhateverIO(), + } + + environ.update(self.defaults) + environ.update(request) + return WSGIRequest(environ) + + +class MockRequest(object): + + def __init__(self): + handler = BaseHandler() + handler.load_middleware() + self.request_factory = RequestFactory() + self.middleware = handler._request_middleware + + def _make_request(self, request_method, *args, **kwargs): + request_method_handler = getattr(self.request_factory, request_method) + request = request_method_handler(*args, **kwargs) + [middleware_processor(request) + for middleware_processor in self.middleware] + return request + + def get(self, *args, **kwargs): + return self._make_request('get', *args, **kwargs) + + def post(self, *args, **kwargs): + return self._make_request('post', *args, **kwargs) + + def put(self, *args, **kwargs): + return self._make_request('put', *args, **kwargs) + + def delete(self, *args, **kwargs): + return self._make_request('delete', *args, **kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_admin.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_admin.py new file mode 100644 index 0000000..d4238ee --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_admin.py @@ -0,0 +1,86 @@ +from __future__ import unicode_literals + +from django.contrib import admin +from django.test import RequestFactory, TestCase + +from djcelery.admin import PeriodicTaskAdmin +from djcelery.models import ( + PeriodicTask, IntervalSchedule, PERIOD_CHOICES, PeriodicTasks +) + + +class MockRequest(object): + pass + + +request = MockRequest() + +site = admin.AdminSite() + + +class TestPeriodicTaskAdmin(TestCase): + @classmethod + def setUpTestData(cls): + cls.interval = IntervalSchedule.objects.create( + every=1, period=PERIOD_CHOICES[0][0]) + + cls.request_factory = RequestFactory() + + cls.pt_admin = PeriodicTaskAdmin(PeriodicTask, site) + + def test_specified_ordering(self): + """ + Ordering should be by ('-enabled', 'name') + """ + PeriodicTask.objects.bulk_create([ + PeriodicTask(name='Bohemian Rhapsody', task='bohemian_rhapsody', + interval=self.interval, enabled=True), + PeriodicTask(name='Somebody to Love', task='somebody_to_love', + interval=self.interval, enabled=False), + PeriodicTask(name='Tie Your Mother Down', + task='tie_your_mother_down', + interval=self.interval, enabled=False), + PeriodicTask(name='Under Pressure', task='under_pressure', + interval=self.interval, enabled=True), + ]) + names = [b.name for b in self.pt_admin.get_queryset(request)] + self.assertListEqual(['Bohemian Rhapsody', 'Under Pressure', + 'Somebody to Love', 'Tie Your Mother Down'], + names) + + def test_enable_tasks_should_enable_disabled_periodic_tasks(self): + """ + enable_tasks action should enable selected periodic tasks + """ + PeriodicTask.objects.create(name='Killer Queen', task='killer_queen', + interval=self.interval, enabled=False), + queryset = PeriodicTask.objects.filter(pk=1) + last_update = PeriodicTasks.objects.get(ident=1).last_update + self.pt_admin.enable_tasks(request, queryset) + new_last_update = PeriodicTasks.objects.get(ident=1).last_update + self.assertTrue(PeriodicTask.objects.get(pk=1).enabled) + self.assertNotEqual(last_update, new_last_update) + + def test_disable_tasks_should_disable_enabled_periodic_tasks(self): + """ + disable_tasks action should disable selected periodic tasks + """ + PeriodicTask.objects.create(name='Killer Queen', task='killer_queen', + interval=self.interval, enabled=True), + queryset = PeriodicTask.objects.filter(pk=1) + self.pt_admin.disable_tasks(request, queryset) + self.assertFalse(PeriodicTask.objects.get(pk=1).enabled) + + def test_for_valid_search_fields(self): + """ + Valid search fields should be ('name', 'task') + """ + search_fields = self.pt_admin.search_fields + self.assertEqual(search_fields, ('name', 'task')) + + for fieldname in search_fields: + query = '%s__icontains' % fieldname + kwargs = {query: 'Queen'} + # We have no content, so the number of results if we search on + # something should be zero. + self.assertEquals(PeriodicTask.objects.filter(**kwargs).count(), 0) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/test_cache.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/test_cache.py new file mode 100644 index 0000000..7bda97d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/test_cache.py @@ -0,0 +1,115 @@ +from __future__ import absolute_import, unicode_literals + +import sys + +from datetime import timedelta + +from billiard.einfo import ExceptionInfo + +from celery import result +from celery import states +from celery.utils import gen_unique_id + +from djcelery.app import app +from djcelery.backends.cache import CacheBackend +from djcelery.tests.utils import unittest + + +class SomeClass(object): + + def __init__(self, data): + self.data = data + + +class test_CacheBackend(unittest.TestCase): + + def test_mark_as_done(self): + cb = CacheBackend(app=app) + + tid = gen_unique_id() + + self.assertEqual(cb.get_status(tid), states.PENDING) + self.assertIsNone(cb.get_result(tid)) + + cb.mark_as_done(tid, 42) + self.assertEqual(cb.get_status(tid), states.SUCCESS) + self.assertEqual(cb.get_result(tid), 42) + self.assertTrue(cb.get_result(tid), 42) + + def test_forget(self): + b = CacheBackend(app=app) + tid = gen_unique_id() + b.mark_as_done(tid, {'foo': 'bar'}) + self.assertEqual(b.get_result(tid).get('foo'), 'bar') + b.forget(tid) + self.assertNotIn(tid, b._cache) + self.assertIsNone(b.get_result(tid)) + + def test_save_restore_delete_group(self): + backend = CacheBackend(app=app) + group_id = gen_unique_id() + subtask_ids = [gen_unique_id() for i in range(10)] + subtasks = list(map(result.AsyncResult, subtask_ids)) + res = result.GroupResult(group_id, subtasks) + res.save(backend=backend) + saved = result.GroupResult.restore(group_id, backend=backend) + self.assertListEqual(saved.subtasks, subtasks) + self.assertEqual(saved.id, group_id) + saved.delete(backend=backend) + self.assertIsNone(result.GroupResult.restore(group_id, + backend=backend)) + + def test_is_pickled(self): + cb = CacheBackend(app=app) + + tid2 = gen_unique_id() + result = {'foo': 'baz', 'bar': SomeClass(12345)} + cb.mark_as_done(tid2, result) + # is serialized properly. + rindb = cb.get_result(tid2) + self.assertEqual(rindb.get('foo'), 'baz') + self.assertEqual(rindb.get('bar').data, 12345) + + def test_mark_as_failure(self): + cb = CacheBackend(app=app) + + einfo = None + tid3 = gen_unique_id() + try: + raise KeyError('foo') + except KeyError as exception: + einfo = ExceptionInfo(sys.exc_info()) + cb.mark_as_failure(tid3, exception, traceback=einfo.traceback) + self.assertEqual(cb.get_status(tid3), states.FAILURE) + self.assertIsInstance(cb.get_result(tid3), KeyError) + self.assertEqual(cb.get_traceback(tid3), einfo.traceback) + + def test_process_cleanup(self): + cb = CacheBackend(app=app) + cb.process_cleanup() + + def test_set_expires(self): + cb1 = CacheBackend(app=app, expires=timedelta(seconds=16)) + self.assertEqual(cb1.expires, 16) + cb2 = CacheBackend(app=app, expires=32) + self.assertEqual(cb2.expires, 32) + + +class test_custom_CacheBackend(unittest.TestCase): + + def test_custom_cache_backend(self): + from celery import current_app + prev_backend = current_app.conf.CELERY_CACHE_BACKEND + prev_module = sys.modules['djcelery.backends.cache'] + + current_app.conf.CELERY_CACHE_BACKEND = 'dummy' + sys.modules.pop('djcelery.backends.cache') + try: + from djcelery.backends.cache import cache + from django.core.cache import cache as django_cache + self.assertEqual(cache.__class__.__module__, + 'django.core.cache.backends.dummy') + self.assertIsNot(cache, django_cache) + finally: + current_app.conf.CELERY_CACHE_BACKEND = prev_backend + sys.modules['djcelery.backends.cache'] = prev_module diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/test_database.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/test_database.py new file mode 100644 index 0000000..cf2591b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_backends/test_database.py @@ -0,0 +1,105 @@ +from __future__ import absolute_import, unicode_literals + +import celery + +from datetime import timedelta + +from celery import current_app +from celery import states +from celery.result import AsyncResult +from celery.task import PeriodicTask +from celery.utils import gen_unique_id + +from djcelery.app import app +from djcelery.backends.database import DatabaseBackend +from djcelery.utils import now +from djcelery.tests.utils import unittest + + +class SomeClass(object): + + def __init__(self, data): + self.data = data + + +class MyPeriodicTask(PeriodicTask): + name = 'c.u.my-periodic-task-244' + run_every = timedelta(seconds=1) + + def run(self, **kwargs): + return 42 + + +class TestDatabaseBackend(unittest.TestCase): + + def test_backend(self): + b = DatabaseBackend(app=app) + tid = gen_unique_id() + + self.assertEqual(b.get_status(tid), states.PENDING) + self.assertIsNone(b.get_result(tid)) + + b.mark_as_done(tid, 42) + self.assertEqual(b.get_status(tid), states.SUCCESS) + self.assertEqual(b.get_result(tid), 42) + + tid2 = gen_unique_id() + result = {'foo': 'baz', 'bar': SomeClass(12345)} + b.mark_as_done(tid2, result) + # is serialized properly. + rindb = b.get_result(tid2) + self.assertEqual(rindb.get('foo'), 'baz') + self.assertEqual(rindb.get('bar').data, 12345) + + tid3 = gen_unique_id() + try: + raise KeyError('foo') + except KeyError as exception: + b.mark_as_failure(tid3, exception) + + self.assertEqual(b.get_status(tid3), states.FAILURE) + self.assertIsInstance(b.get_result(tid3), KeyError) + + def test_forget(self): + b = DatabaseBackend(app=app) + tid = gen_unique_id() + b.mark_as_done(tid, {'foo': 'bar'}) + x = AsyncResult(tid) + self.assertEqual(x.result.get('foo'), 'bar') + x.forget() + if celery.VERSION[0:3] == (3, 1, 10): + # bug in 3.1.10 means result did not clear cache after forget. + x._cache = None + self.assertIsNone(x.result) + + def test_group_store(self): + b = DatabaseBackend(app=app) + tid = gen_unique_id() + + self.assertIsNone(b.restore_group(tid)) + + result = {'foo': 'baz', 'bar': SomeClass(12345)} + b.save_group(tid, result) + rindb = b.restore_group(tid) + self.assertIsNotNone(rindb) + self.assertEqual(rindb.get('foo'), 'baz') + self.assertEqual(rindb.get('bar').data, 12345) + b.delete_group(tid) + self.assertIsNone(b.restore_group(tid)) + + def test_cleanup(self): + b = DatabaseBackend(app=app) + b.TaskModel._default_manager.all().delete() + ids = [gen_unique_id() for _ in range(3)] + for i, res in enumerate((16, 32, 64)): + b.mark_as_done(ids[i], res) + + self.assertEqual(b.TaskModel._default_manager.count(), 3) + + then = now() - current_app.conf.CELERY_TASK_RESULT_EXPIRES * 2 + # Have to avoid save() because it applies the auto_now=True. + b.TaskModel._default_manager.filter(task_id__in=ids[:-1]) \ + .update(date_done=then) + + b.cleanup() + self.assertEqual(b.TaskModel._default_manager.count(), 1) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_commands.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_commands.py new file mode 100644 index 0000000..2be01f9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_commands.py @@ -0,0 +1,32 @@ +# coding: utf-8 +from django import VERSION +from django.core.management import execute_from_command_line + +from ._compat import patch + + +CELERYD_COMMAND = 'djcelery.management.commands.celeryd.Command.handle' + + +def test_celeryd_command(): + if VERSION >= (1, 10): + traceback = False + else: + traceback = None + with patch(CELERYD_COMMAND) as handle: + execute_from_command_line(['manage.py', 'celeryd', '--hostname=test', + '--loglevel=info']) + handle.assert_called_with( + autoreload=None, autoscale=None, beat=None, broker=None, + concurrency=0, detach=None, exclude_queues=[], executable=None, + gid=None, heartbeat_interval=None, hostname="test", include=[], + logfile=None, loglevel='info', max_tasks_per_child=None, + no_color=False, no_execv=False, optimization=None, pidfile=None, + pool_cls='prefork', purge=False, pythonpath=None, queues=[], + quiet=None, schedule_filename='celerybeat-schedule', + scheduler_cls=None, send_events=False, settings=None, + state_db=None, task_soft_time_limit=None, + task_time_limit=None, traceback=traceback, uid=None, umask=None, + verbosity=1, without_gossip=False, without_heartbeat=False, + without_mingle=False, working_directory=None + ) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_discovery.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_discovery.py new file mode 100644 index 0000000..22ebb4e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_discovery.py @@ -0,0 +1,35 @@ +from __future__ import absolute_import, unicode_literals + +import warnings + +from django.conf import settings + +from celery.registry import tasks + +from djcelery.loaders import autodiscover +from djcelery.tests.utils import unittest + + +class TestDiscovery(unittest.TestCase): + + def assertDiscovery(self): + apps = autodiscover() + self.assertTrue(apps) + self.assertIn('c.unittest.SomeAppTask', tasks) + self.assertEqual(tasks['c.unittest.SomeAppTask'].run(), 42) + + def test_discovery(self): + if 'someapp' in settings.INSTALLED_APPS: + self.assertDiscovery() + + def test_discovery_with_broken(self): + warnings.resetwarnings() + if 'someapp' in settings.INSTALLED_APPS: + installed_apps = list(settings.INSTALLED_APPS) + settings.INSTALLED_APPS = installed_apps + ['xxxnot.aexist'] + try: + with warnings.catch_warnings(record=True) as log: + autodiscover() + self.assertTrue(log) + finally: + settings.INSTALLED_APPS = installed_apps diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_loaders.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_loaders.py new file mode 100644 index 0000000..2170e9b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_loaders.py @@ -0,0 +1,45 @@ +from __future__ import absolute_import, unicode_literals + +from celery import loaders + +from djcelery import loaders as djloaders +from djcelery.app import app +from djcelery.tests.utils import unittest + + +class TestDjangoLoader(unittest.TestCase): + + def setUp(self): + self.loader = djloaders.DjangoLoader(app=app) + + def test_get_loader_cls(self): + + self.assertEqual(loaders.get_loader_cls('django'), + self.loader.__class__) + # Execute cached branch. + self.assertEqual(loaders.get_loader_cls('django'), + self.loader.__class__) + + def test_on_worker_init(self): + from django.conf import settings + old_imports = getattr(settings, 'CELERY_IMPORTS', ()) + settings.CELERY_IMPORTS = ('xxx.does.not.exist', ) + try: + self.assertRaises(ImportError, self.loader.import_default_modules) + finally: + settings.CELERY_IMPORTS = old_imports + + def test_race_protection(self): + djloaders._RACE_PROTECTION = True + try: + self.assertFalse(self.loader.on_worker_init()) + finally: + djloaders._RACE_PROTECTION = False + + def test_find_related_module_no_path(self): + self.assertFalse(djloaders.find_related_module('sys', 'tasks')) + + def test_find_related_module_no_related(self): + self.assertFalse( + djloaders.find_related_module('someapp', 'frobulators'), + ) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_models.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_models.py new file mode 100644 index 0000000..9ee2575 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_models.py @@ -0,0 +1,102 @@ +from __future__ import absolute_import, unicode_literals + +from datetime import datetime, timedelta + +from celery import states +from celery.utils import gen_unique_id + +from djcelery import celery +from djcelery.models import TaskMeta, TaskSetMeta +from djcelery.utils import now +from djcelery.tests.utils import unittest +from djcelery.compat import unicode + + +class TestModels(unittest.TestCase): + + def createTaskMeta(self): + id = gen_unique_id() + taskmeta, created = TaskMeta.objects.get_or_create(task_id=id) + return taskmeta + + def createTaskSetMeta(self): + id = gen_unique_id() + tasksetmeta, created = TaskSetMeta.objects.get_or_create(taskset_id=id) + return tasksetmeta + + def test_taskmeta(self): + m1 = self.createTaskMeta() + m2 = self.createTaskMeta() + m3 = self.createTaskMeta() + self.assertTrue(unicode(m1).startswith(''.format(self) + + +class MyRetryTaskError(MyError): + pass + + +task_is_successful = partial(reversestar, 'celery-is_task_successful') +task_status = partial(reversestar, 'celery-task_status') +task_apply = partial(reverse, 'celery-apply') +registered_tasks = partial(reverse, 'celery-tasks') +scratch = {} + + +@task() +def mytask(x, y): + ret = scratch['result'] = int(x) * int(y) + return ret + + +def create_exception(name, base=Exception): + return type(name, (base, ), {}) + + +def catch_exception(exception): + try: + raise exception + except exception.__class__ as exc: + exc = current_app.backend.prepare_exception(exc) + return exc, ExceptionInfo(sys.exc_info()).traceback + + +class ViewTestCase(DjangoTestCase): + + def assertJSONEqual(self, json, py): + json = isinstance(json, HttpResponse) and json.content or json + try: + self.assertEqual(deserialize(json.decode('utf-8')), py) + except TypeError as exc: + raise TypeError('{0}: {1}'.format(exc, json)) + + def assertIn(self, expected, source, *args): + try: + DjangoTestCase.assertIn(self, expected, source, *args) + except AttributeError: + self.assertTrue(expected in source) + + def assertDictContainsSubset(self, subset, dictionary, *args): + for key, value in subset.items(): + self.assertIn(key, dictionary) + self.assertEqual(dictionary[key], value) + + +class test_task_apply(ViewTestCase): + + def test_apply(self): + current_app.conf.CELERY_ALWAYS_EAGER = True + try: + self.client.get( + task_apply(kwargs={'task_name': mytask.name}) + '?x=4&y=4', + ) + self.assertEqual(scratch['result'], 16) + finally: + current_app.conf.CELERY_ALWAYS_EAGER = False + + def test_apply_raises_404_on_unregistered_task(self): + current_app.conf.CELERY_ALWAYS_EAGER = True + try: + name = 'xxx.does.not.exist' + action = partial( + self.client.get, + task_apply(kwargs={'task_name': name}) + '?x=4&y=4', + ) + try: + res = action() + except TemplateDoesNotExist: + pass # pre Django 1.5 + else: + self.assertEqual(res.status_code, 404) + finally: + current_app.conf.CELERY_ALWAYS_EAGER = False + + +class test_registered_tasks(ViewTestCase): + + def test_list_registered_tasks(self): + json = self.client.get(registered_tasks()) + tasks = deserialize(json.content.decode('utf-8')) + self.assertIn('celery.backend_cleanup', tasks['regular']) + + +class test_webhook_task(ViewTestCase): + + def test_successful_request(self): + + @task_webhook + def add_webhook(request): + x = int(request.GET['x']) + y = int(request.GET['y']) + return x + y + + request = MockRequest().get('/tasks/add', dict(x=10, y=10)) + response = add_webhook(request) + self.assertDictContainsSubset( + {'status': 'success', 'retval': 20}, + deserialize(response.content.decode('utf-8'))) + + def test_failed_request(self): + + @task_webhook + def error_webhook(request): + x = int(request.GET['x']) + y = int(request.GET['y']) + raise MyError(x + y) + + request = MockRequest().get('/tasks/error', dict(x=10, y=10)) + response = error_webhook(request) + self.assertDictContainsSubset( + {'status': 'failure', + 'reason': ''}, + deserialize(response.content.decode('utf-8'))) + + +class test_task_status(ViewTestCase): + + def assertStatusForIs(self, status, res, traceback=None): + uuid = gen_unique_id() + current_app.backend.store_result(uuid, res, status, + traceback=traceback) + json = self.client.get(task_status(task_id=uuid)) + expect = dict(id=uuid, status=status, result=res) + if status in current_app.backend.EXCEPTION_STATES: + instore = current_app.backend.get_result(uuid) + self.assertEqual(str(instore.args[0]), str(res.args[0])) + expect['result'] = repr(res) + expect['exc'] = get_full_cls_name(res.__class__) + expect['traceback'] = traceback + + self.assertJSONEqual(json, dict(task=expect)) + + def test_success(self): + self.assertStatusForIs(states.SUCCESS, 'The quick brown fox') + + def test_failure(self): + exc, tb = catch_exception(MyError('foo')) + self.assertStatusForIs(states.FAILURE, exc, tb) + + def test_retry(self): + oexc, _ = catch_exception(MyError('Resource not available')) + exc, tb = catch_exception(MyRetryTaskError(str(oexc), oexc)) + self.assertStatusForIs(states.RETRY, exc, tb) + + +class test_task_is_successful(ViewTestCase): + + def assertStatusForIs(self, status, outcome, result=None): + uuid = gen_unique_id() + result = result or gen_unique_id() + current_app.backend.store_result(uuid, result, status) + json = self.client.get(task_is_successful(task_id=uuid)) + self.assertJSONEqual(json, {'task': {'id': uuid, + 'executed': outcome}}) + + def test_success(self): + self.assertStatusForIs(states.SUCCESS, True) + + def test_pending(self): + self.assertStatusForIs(states.PENDING, False) + + def test_failure(self): + self.assertStatusForIs(states.FAILURE, False, KeyError('foo')) + + def test_retry(self): + self.assertStatusForIs(states.RETRY, False, KeyError('foo')) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_worker_job.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_worker_job.py new file mode 100644 index 0000000..c625f5a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/tests/test_worker_job.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import, unicode_literals + +from django.core import cache + +from celery.utils import gen_unique_id +from celery.task import task as task_dec + +from celery.tests.worker.test_request import jail + +from djcelery.app import app +from djcelery.tests.utils import unittest + + +@task_dec() +def mytask(i): + return i ** i + + +@task_dec() +def get_db_connection(i): + from django.db import connection + return id(connection) + + +get_db_connection.ignore_result = True + + +class TestJail(unittest.TestCase): + + def test_django_db_connection_is_closed(self): + from django.db import connection + connection._was_closed = False + old_connection_close = connection.close + + def monkeypatched_connection_close(*args, **kwargs): + connection._was_closed = True + return old_connection_close(*args, **kwargs) + + connection.close = monkeypatched_connection_close + try: + jail(app, gen_unique_id(), get_db_connection.name, [2], {}) + self.assertTrue(connection._was_closed) + finally: + connection.close = old_connection_close + + def test_django_cache_connection_is_closed(self): + old_cache_close = getattr(cache.cache, 'close', None) + cache._was_closed = False + old_cache_parse_backend = getattr(cache, 'parse_backend_uri', None) + if old_cache_parse_backend: # checks to make sure attr exists + delattr(cache, 'parse_backend_uri') + + def monkeypatched_cache_close(*args, **kwargs): + cache._was_closed = True + + cache.cache.close = monkeypatched_cache_close + + jail(app, gen_unique_id(), mytask.name, [4], {}) + self.assertTrue(cache._was_closed) + cache.cache.close = old_cache_close + if old_cache_parse_backend: + cache.parse_backend_uri = old_cache_parse_backend + + def test_django_cache_connection_is_closed_django_1_1(self): + old_cache_close = getattr(cache.cache, 'close', None) + cache._was_closed = False + old_cache_parse_backend = getattr(cache, 'parse_backend_uri', None) + cache.parse_backend_uri = lambda uri: ['libmemcached', '1', '2'] + + def monkeypatched_cache_close(*args, **kwargs): + cache._was_closed = True + + cache.cache.close = monkeypatched_cache_close + + jail(app, gen_unique_id(), mytask.name, [4], {}) + self.assertTrue(cache._was_closed) + cache.cache.close = old_cache_close + if old_cache_parse_backend: + cache.parse_backend_uri = old_cache_parse_backend + else: + del(cache.parse_backend_uri) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/tests/utils.py b/thesisenv/lib/python3.6/site-packages/djcelery/tests/utils.py new file mode 100644 index 0000000..1d93261 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/tests/utils.py @@ -0,0 +1,7 @@ +from __future__ import absolute_import, unicode_literals + +try: + import unittest + unittest.skip +except AttributeError: + import unittest2 as unittest # noqa diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/transport/__init__.py b/thesisenv/lib/python3.6/site-packages/djcelery/transport/__init__.py new file mode 100644 index 0000000..e4512f0 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/transport/__init__.py @@ -0,0 +1,10 @@ +""" + +This module is an alias to :mod:`kombu.transport.django` + +""" +from __future__ import absolute_import, unicode_literals + +import kombu.transport.django as transport + +__path__.extend(transport.__path__) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/urls.py b/thesisenv/lib/python3.6/site-packages/djcelery/urls.py new file mode 100644 index 0000000..079eb51 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/urls.py @@ -0,0 +1,40 @@ +""" + +URLs defined for celery. + +* ``/$task_id/done/`` + + URL to :func:`~celery.views.is_successful`. + +* ``/$task_id/status/`` + + URL to :func:`~celery.views.task_status`. + +""" +from __future__ import absolute_import, unicode_literals + + +from django.conf.urls import url + + +from . import views + +task_pattern = r'(?P[\w\d\-\.]+)' + +urlpatterns = [ + url( + r'^%s/done/?$' % task_pattern, + views.is_task_successful, + name='celery-is_task_successful' + ), + url( + r'^%s/status/?$' % task_pattern, + views.task_status, + name='celery-task_status' + ), + url( + r'^tasks/?$', + views.registered_tasks, + name='celery-tasks' + ), +] diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/utils.py b/thesisenv/lib/python3.6/site-packages/djcelery/utils.py new file mode 100644 index 0000000..71ec83a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/utils.py @@ -0,0 +1,92 @@ +# -- XXX This module must not use translation as that causes +# -- a recursive loader import! +from __future__ import absolute_import, unicode_literals + +from datetime import datetime + +from django.conf import settings +from django.utils import timezone + +# Database-related exceptions. +from django.db import DatabaseError +try: + import MySQLdb as mysql + _my_database_errors = (mysql.DatabaseError, + mysql.InterfaceError, + mysql.OperationalError) +except ImportError: + _my_database_errors = () # noqa +try: + import psycopg2 as pg + _pg_database_errors = (pg.DatabaseError, + pg.InterfaceError, + pg.OperationalError) +except ImportError: + _pg_database_errors = () # noqa +try: + import sqlite3 + _lite_database_errors = (sqlite3.DatabaseError, + sqlite3.InterfaceError, + sqlite3.OperationalError) +except ImportError: + _lite_database_errors = () # noqa +try: + import cx_Oracle as oracle + _oracle_database_errors = (oracle.DatabaseError, + oracle.InterfaceError, + oracle.OperationalError) +except ImportError: + _oracle_database_errors = () # noqa + +DATABASE_ERRORS = ((DatabaseError, ) + + _my_database_errors + + _pg_database_errors + + _lite_database_errors + + _oracle_database_errors) + + +def make_aware(value): + if settings.USE_TZ: + # naive datetimes are assumed to be in UTC. + if timezone.is_naive(value): + value = timezone.make_aware(value, timezone.utc) + # then convert to the Django configured timezone. + default_tz = timezone.get_default_timezone() + value = timezone.localtime(value, default_tz) + return value + + +def make_naive(value): + if settings.USE_TZ: + default_tz = timezone.get_default_timezone() + value = timezone.make_naive(value, default_tz) + return value + + +def now(): + return make_aware(timezone.now()) + + +def correct_awareness(value): + if isinstance(value, datetime): + if settings.USE_TZ: + return make_aware(value) + elif timezone.is_aware(value): + default_tz = timezone.get_default_timezone() + return timezone.make_naive(value, default_tz) + return value + + +def is_database_scheduler(scheduler): + if not scheduler: + return False + from kombu.utils import symbol_by_name + from .schedulers import DatabaseScheduler + return issubclass(symbol_by_name(scheduler), DatabaseScheduler) + + +def fromtimestamp(value): + if settings.USE_TZ: + return make_aware(datetime.utcfromtimestamp(value)) + else: + return datetime.fromtimestamp(value) diff --git a/thesisenv/lib/python3.6/site-packages/djcelery/views.py b/thesisenv/lib/python3.6/site-packages/djcelery/views.py new file mode 100644 index 0000000..47f9654 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/djcelery/views.py @@ -0,0 +1,125 @@ +from __future__ import absolute_import, unicode_literals + +from functools import wraps + +from django.http import HttpResponse, Http404 + +from anyjson import serialize + +from celery import states +from celery.five import keys, items +from celery.registry import tasks +from celery.result import AsyncResult +from celery.utils import get_full_cls_name +from celery.utils.encoding import safe_repr + +# Ensure built-in tasks are loaded for task_list view +import celery.task # noqa + + +def JsonResponse(response): + return HttpResponse(serialize(response), content_type='application/json') + + +def task_view(task): + """Decorator turning any task into a view that applies the task + asynchronously. Keyword arguments (via URLconf, etc.) will + supercede GET or POST parameters when there are conflicts. + + Returns a JSON dictionary containing the keys ``ok``, and + ``task_id``. + + """ + + def _applier(request, **options): + kwargs = request.POST if request.method == 'POST' else request.GET + # no multivalue + kwargs = {k: v for k, v in items(kwargs)} + if options: + kwargs.update(options) + result = task.apply_async(kwargs=kwargs) + return JsonResponse({'ok': 'true', 'task_id': result.task_id}) + + return _applier + + +def apply(request, task_name): + """View applying a task. + + **Note:** Please use this with caution. Preferably you shouldn't make this + publicly accessible without ensuring your code is safe! + + """ + try: + task = tasks[task_name] + except KeyError: + raise Http404('apply: no such task') + return task_view(task)(request) + + +def is_task_successful(request, task_id): + """Returns task execute status in JSON format.""" + return JsonResponse({'task': { + 'id': task_id, + 'executed': AsyncResult(task_id).successful(), + }}) + + +def task_status(request, task_id): + """Returns task status and result in JSON format.""" + result = AsyncResult(task_id) + state, retval = result.state, result.result + response_data = {'id': task_id, 'status': state, 'result': retval} + if state in states.EXCEPTION_STATES: + traceback = result.traceback + response_data.update({'result': safe_repr(retval), + 'exc': get_full_cls_name(retval.__class__), + 'traceback': traceback}) + return JsonResponse({'task': response_data}) + + +def registered_tasks(request): + """View returning all defined tasks as a JSON object.""" + return JsonResponse({'regular': list(keys(tasks)), 'periodic': ''}) + + +def task_webhook(fun): + """Decorator turning a function into a task webhook. + + If an exception is raised within the function, the decorated + function catches this and returns an error JSON response, otherwise + it returns the result as a JSON response. + + + Example: + + .. code-block:: python + + @task_webhook + def add(request): + x = int(request.GET['x']) + y = int(request.GET['y']) + return x + y + + def view(request): + response = add(request) + print(response.content) + + Gives:: + + "{'status': 'success', 'retval': 100}" + + """ + + @wraps(fun) + def _inner(*args, **kwargs): + try: + retval = fun(*args, **kwargs) + except Exception as exc: + response = {'status': 'failure', 'reason': safe_repr(exc)} + else: + response = {'status': 'success', 'retval': retval} + + return JsonResponse(response) + + return _inner diff --git a/thesisenv/lib/python3.6/site-packages/funtests/__init__.py b/thesisenv/lib/python3.6/site-packages/funtests/__init__.py new file mode 100644 index 0000000..1bea488 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/funtests/__init__.py @@ -0,0 +1,5 @@ +import os +import sys + +sys.path.insert(0, os.pardir) +sys.path.insert(0, os.getcwd()) diff --git a/thesisenv/lib/python3.6/site-packages/funtests/setup.py b/thesisenv/lib/python3.6/site-packages/funtests/setup.py new file mode 100644 index 0000000..a82a603 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/funtests/setup.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +try: + from setuptools import setup + from setuptools.command.install import install +except ImportError: + from ez_setup import use_setuptools + use_setuptools() + from setuptools import setup # noqa + from setuptools.command.install import install # noqa + + +class no_install(install): + + def run(self, *args, **kwargs): + import sys + sys.stderr.write(""" +------------------------------------------------------- +The billiard functional test suite cannot be installed. +------------------------------------------------------- + + +But you can execute the tests by running the command: + + $ python setup.py test + + +""") + + +setup( + name='billiard-funtests', + version='DEV', + description='Functional test suite for billiard', + author='Ask Solem', + author_email='ask@celeryproject.org', + url='http://github.com/celery/billiard', + platforms=['any'], + packages=[], + data_files=[], + zip_safe=False, + cmdclass={'install': no_install}, + test_suite='nose.collector', + build_requires=[ + 'nose', + 'unittest2', + 'coverage>=3.0', + ], + classifiers=[ + 'Operating System :: OS Independent', + 'Programming Language :: Python', + 'Programming Language :: C' + 'License :: OSI Approved :: BSD License', + 'Intended Audience :: Developers', + ], + long_description='Do not install this package', +) diff --git a/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/DESCRIPTION.rst b/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..5a0a262 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/DESCRIPTION.rst @@ -0,0 +1,332 @@ +.. _kombu-index: + +======================================== + kombu - Messaging library for Python +======================================== + +:Version: 3.0.37 + +`Kombu` is a messaging library for Python. + +The aim of `Kombu` is to make messaging in Python as easy as possible by +providing an idiomatic high-level interface for the AMQ protocol, and also +provide proven and tested solutions to common messaging problems. + +`AMQP`_ is the Advanced Message Queuing Protocol, an open standard protocol +for message orientation, queuing, routing, reliability and security, +for which the `RabbitMQ`_ messaging server is the most popular implementation. + +Features +======== + +* Allows application authors to support several message server + solutions by using pluggable transports. + + * AMQP transport using the `py-amqp`_, `librabbitmq`_, or `qpid-python`_ client libraries. + + * High performance AMQP transport written in C - when using `librabbitmq`_ + + This is automatically enabled if librabbitmq is installed:: + + $ pip install librabbitmq + + * Virtual transports makes it really easy to add support for non-AMQP + transports. There is already built-in support for `Redis`_, + `Beanstalk`_, `Amazon SQS`_, `CouchDB`_, `MongoDB`_, `ZeroMQ`_, + `ZooKeeper`_, `SoftLayer MQ`_ and `Pyro`_. + + * You can also use the SQLAlchemy and Django ORM transports to + use a database as the broker. + + * In-memory transport for unit testing. + +* Supports automatic encoding, serialization and compression of message + payloads. + +* Consistent exception handling across transports. + +* The ability to ensure that an operation is performed by gracefully + handling connection and channel errors. + +* Several annoyances with `amqplib`_ has been fixed, like supporting + timeouts and the ability to wait for events on more than one channel. + +* Projects already using `carrot`_ can easily be ported by using + a compatibility layer. + +For an introduction to AMQP you should read the article `Rabbits and warrens`_, +and the `Wikipedia article about AMQP`_. + +.. _`RabbitMQ`: http://www.rabbitmq.com/ +.. _`AMQP`: http://amqp.org +.. _`py-amqp`: http://pypi.python.org/pypi/amqp/ +.. _`qpid-python`: http://pypi.python.org/pypi/qpid-python/ +.. _`Redis`: http://code.google.com/p/redis/ +.. _`Amazon SQS`: http://aws.amazon.com/sqs/ +.. _`MongoDB`: http://www.mongodb.org/ +.. _`CouchDB`: http://couchdb.apache.org/ +.. _`ZeroMQ`: http://zeromq.org/ +.. _`Zookeeper`: https://zookeeper.apache.org/ +.. _`Beanstalk`: http://kr.github.com/beanstalkd/ +.. _`Rabbits and warrens`: http://blogs.digitar.com/jjww/2009/01/rabbits-and-warrens/ +.. _`amqplib`: http://barryp.org/software/py-amqplib/ +.. _`Wikipedia article about AMQP`: http://en.wikipedia.org/wiki/AMQP +.. _`carrot`: http://pypi.python.org/pypi/carrot/ +.. _`librabbitmq`: http://pypi.python.org/pypi/librabbitmq +.. _`Pyro`: http://pythonhosting.org/Pyro +.. _`SoftLayer MQ`: http://www.softlayer.com/services/additional/message-queue + + +.. _transport-comparison: + +Transport Comparison +==================== + ++---------------+----------+------------+------------+---------------+ +| **Client** | **Type** | **Direct** | **Topic** | **Fanout** | ++---------------+----------+------------+------------+---------------+ +| *amqp* | Native | Yes | Yes | Yes | ++---------------+----------+------------+------------+---------------+ +| *qpid* | Native | Yes | Yes | Yes | ++---------------+----------+------------+------------+---------------+ +| *redis* | Virtual | Yes | Yes | Yes (PUB/SUB) | ++---------------+----------+------------+------------+---------------+ +| *mongodb* | Virtual | Yes | Yes | Yes | ++---------------+----------+------------+------------+---------------+ +| *beanstalk* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *SQS* | Virtual | Yes | Yes [#f1]_ | Yes [#f2]_ | ++---------------+----------+------------+------------+---------------+ +| *couchdb* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *zookeeper* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *in-memory* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *django* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *sqlalchemy* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *SLMQ* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ + + +.. [#f1] Declarations only kept in memory, so exchanges/queues + must be declared by all clients that needs them. + +.. [#f2] Fanout supported via storing routing tables in SimpleDB. + Disabled by default, but can be enabled by using the + ``supports_fanout`` transport option. + + +Documentation +------------- + +Kombu is using Sphinx, and the latest documentation can be found here: + + https://kombu.readthedocs.io/ + +Quick overview +-------------- + +:: + + from kombu import Connection, Exchange, Queue + + media_exchange = Exchange('media', 'direct', durable=True) + video_queue = Queue('video', exchange=media_exchange, routing_key='video') + + def process_media(body, message): + print body + message.ack() + + # connections + with Connection('amqp://guest:guest@localhost//') as conn: + + # produce + producer = conn.Producer(serializer='json') + producer.publish({'name': '/tmp/lolcat1.avi', 'size': 1301013}, + exchange=media_exchange, routing_key='video', + declare=[video_queue]) + + # the declare above, makes sure the video queue is declared + # so that the messages can be delivered. + # It's a best practice in Kombu to have both publishers and + # consumers declare the queue. You can also declare the + # queue manually using: + # video_queue(conn).declare() + + # consume + with conn.Consumer(video_queue, callbacks=[process_media]) as consumer: + # Process messages and handle events on all channels + while True: + conn.drain_events() + + # Consume from several queues on the same channel: + video_queue = Queue('video', exchange=media_exchange, key='video') + image_queue = Queue('image', exchange=media_exchange, key='image') + + with connection.Consumer([video_queue, image_queue], + callbacks=[process_media]) as consumer: + while True: + connection.drain_events() + + +Or handle channels manually:: + + with connection.channel() as channel: + producer = Producer(channel, ...) + consumer = Producer(channel) + + +All objects can be used outside of with statements too, +just remember to close the objects after use:: + + from kombu import Connection, Consumer, Producer + + connection = Connection() + # ... + connection.release() + + consumer = Consumer(channel_or_connection, ...) + consumer.register_callback(my_callback) + consumer.consume() + # .... + consumer.cancel() + + +`Exchange` and `Queue` are simply declarations that can be pickled +and used in configuration files etc. + +They also support operations, but to do so they need to be bound +to a channel. + +Binding exchanges and queues to a connection will make it use +that connections default channel. + +:: + + >>> exchange = Exchange('tasks', 'direct') + + >>> connection = Connection() + >>> bound_exchange = exchange(connection) + >>> bound_exchange.delete() + + # the original exchange is not affected, and stays unbound. + >>> exchange.delete() + raise NotBoundError: Can't call delete on Exchange not bound to + a channel. + +Installation +============ + +You can install `Kombu` either via the Python Package Index (PyPI) +or from source. + +To install using `pip`,:: + + $ pip install kombu + +To install using `easy_install`,:: + + $ easy_install kombu + +If you have downloaded a source tarball you can install it +by doing the following,:: + + $ python setup.py build + # python setup.py install # as root + + +Terminology +=========== + +There are some concepts you should be familiar with before starting: + + * Producers + + Producers sends messages to an exchange. + + * Exchanges + + Messages are sent to exchanges. Exchanges are named and can be + configured to use one of several routing algorithms. The exchange + routes the messages to consumers by matching the routing key in the + message with the routing key the consumer provides when binding to + the exchange. + + * Consumers + + Consumers declares a queue, binds it to a exchange and receives + messages from it. + + * Queues + + Queues receive messages sent to exchanges. The queues are declared + by consumers. + + * Routing keys + + Every message has a routing key. The interpretation of the routing + key depends on the exchange type. There are four default exchange + types defined by the AMQP standard, and vendors can define custom + types (so see your vendors manual for details). + + These are the default exchange types defined by AMQP/0.8: + + * Direct exchange + + Matches if the routing key property of the message and + the `routing_key` attribute of the consumer are identical. + + * Fan-out exchange + + Always matches, even if the binding does not have a routing + key. + + * Topic exchange + + Matches the routing key property of the message by a primitive + pattern matching scheme. The message routing key then consists + of words separated by dots (`"."`, like domain names), and + two special characters are available; star (`"*"`) and hash + (`"#"`). The star matches any word, and the hash matches + zero or more words. For example `"*.stock.#"` matches the + routing keys `"usd.stock"` and `"eur.stock.db"` but not + `"stock.nasdaq"`. + +Getting Help +============ + +Mailing list +------------ + +Join the `carrot-users`_ mailing list. + +.. _`carrot-users`: http://groups.google.com/group/carrot-users/ + +Bug tracker +=========== + +If you have any suggestions, bug reports or annoyances please report them +to our issue tracker at http://github.com/celery/kombu/issues/ + +Contributing +============ + +Development of `Kombu` happens at Github: http://github.com/celery/kombu + +You are highly encouraged to participate in the development. If you don't +like Github (for some reason) you're welcome to send regular patches. + +License +======= + +This software is licensed under the `New BSD License`. See the `LICENSE` +file in the top distribution directory for the full license text. + +.. image:: https://d2weczhvl823v0.cloudfront.net/celery/kombu/trend.png + :alt: Bitdeli badge + :target: https://bitdeli.com/free + + diff --git a/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/INSTALLER b/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/METADATA b/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/METADATA new file mode 100644 index 0000000..d073a86 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/METADATA @@ -0,0 +1,364 @@ +Metadata-Version: 2.0 +Name: kombu +Version: 3.0.37 +Summary: Messaging library for Python +Home-page: https://kombu.readthedocs.io +Author: Ask Solem +Author-email: ask@celeryproject.org +License: UNKNOWN +Platform: any +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python :: Implementation :: Jython +Classifier: Intended Audience :: Developers +Classifier: Topic :: Communications +Classifier: Topic :: System :: Distributed Computing +Classifier: Topic :: System :: Networking +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Dist: anyjson >= 0.3.3 +Requires-Dist: amqp >= 1.4.9,<2.0 +Requires-Dist: importlib; python_version == "2.6" +Requires-Dist: ordereddict; python_version == "2.6" + +.. _kombu-index: + +======================================== + kombu - Messaging library for Python +======================================== + +:Version: 3.0.37 + +`Kombu` is a messaging library for Python. + +The aim of `Kombu` is to make messaging in Python as easy as possible by +providing an idiomatic high-level interface for the AMQ protocol, and also +provide proven and tested solutions to common messaging problems. + +`AMQP`_ is the Advanced Message Queuing Protocol, an open standard protocol +for message orientation, queuing, routing, reliability and security, +for which the `RabbitMQ`_ messaging server is the most popular implementation. + +Features +======== + +* Allows application authors to support several message server + solutions by using pluggable transports. + + * AMQP transport using the `py-amqp`_, `librabbitmq`_, or `qpid-python`_ client libraries. + + * High performance AMQP transport written in C - when using `librabbitmq`_ + + This is automatically enabled if librabbitmq is installed:: + + $ pip install librabbitmq + + * Virtual transports makes it really easy to add support for non-AMQP + transports. There is already built-in support for `Redis`_, + `Beanstalk`_, `Amazon SQS`_, `CouchDB`_, `MongoDB`_, `ZeroMQ`_, + `ZooKeeper`_, `SoftLayer MQ`_ and `Pyro`_. + + * You can also use the SQLAlchemy and Django ORM transports to + use a database as the broker. + + * In-memory transport for unit testing. + +* Supports automatic encoding, serialization and compression of message + payloads. + +* Consistent exception handling across transports. + +* The ability to ensure that an operation is performed by gracefully + handling connection and channel errors. + +* Several annoyances with `amqplib`_ has been fixed, like supporting + timeouts and the ability to wait for events on more than one channel. + +* Projects already using `carrot`_ can easily be ported by using + a compatibility layer. + +For an introduction to AMQP you should read the article `Rabbits and warrens`_, +and the `Wikipedia article about AMQP`_. + +.. _`RabbitMQ`: http://www.rabbitmq.com/ +.. _`AMQP`: http://amqp.org +.. _`py-amqp`: http://pypi.python.org/pypi/amqp/ +.. _`qpid-python`: http://pypi.python.org/pypi/qpid-python/ +.. _`Redis`: http://code.google.com/p/redis/ +.. _`Amazon SQS`: http://aws.amazon.com/sqs/ +.. _`MongoDB`: http://www.mongodb.org/ +.. _`CouchDB`: http://couchdb.apache.org/ +.. _`ZeroMQ`: http://zeromq.org/ +.. _`Zookeeper`: https://zookeeper.apache.org/ +.. _`Beanstalk`: http://kr.github.com/beanstalkd/ +.. _`Rabbits and warrens`: http://blogs.digitar.com/jjww/2009/01/rabbits-and-warrens/ +.. _`amqplib`: http://barryp.org/software/py-amqplib/ +.. _`Wikipedia article about AMQP`: http://en.wikipedia.org/wiki/AMQP +.. _`carrot`: http://pypi.python.org/pypi/carrot/ +.. _`librabbitmq`: http://pypi.python.org/pypi/librabbitmq +.. _`Pyro`: http://pythonhosting.org/Pyro +.. _`SoftLayer MQ`: http://www.softlayer.com/services/additional/message-queue + + +.. _transport-comparison: + +Transport Comparison +==================== + ++---------------+----------+------------+------------+---------------+ +| **Client** | **Type** | **Direct** | **Topic** | **Fanout** | ++---------------+----------+------------+------------+---------------+ +| *amqp* | Native | Yes | Yes | Yes | ++---------------+----------+------------+------------+---------------+ +| *qpid* | Native | Yes | Yes | Yes | ++---------------+----------+------------+------------+---------------+ +| *redis* | Virtual | Yes | Yes | Yes (PUB/SUB) | ++---------------+----------+------------+------------+---------------+ +| *mongodb* | Virtual | Yes | Yes | Yes | ++---------------+----------+------------+------------+---------------+ +| *beanstalk* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *SQS* | Virtual | Yes | Yes [#f1]_ | Yes [#f2]_ | ++---------------+----------+------------+------------+---------------+ +| *couchdb* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *zookeeper* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *in-memory* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *django* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *sqlalchemy* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ +| *SLMQ* | Virtual | Yes | Yes [#f1]_ | No | ++---------------+----------+------------+------------+---------------+ + + +.. [#f1] Declarations only kept in memory, so exchanges/queues + must be declared by all clients that needs them. + +.. [#f2] Fanout supported via storing routing tables in SimpleDB. + Disabled by default, but can be enabled by using the + ``supports_fanout`` transport option. + + +Documentation +------------- + +Kombu is using Sphinx, and the latest documentation can be found here: + + https://kombu.readthedocs.io/ + +Quick overview +-------------- + +:: + + from kombu import Connection, Exchange, Queue + + media_exchange = Exchange('media', 'direct', durable=True) + video_queue = Queue('video', exchange=media_exchange, routing_key='video') + + def process_media(body, message): + print body + message.ack() + + # connections + with Connection('amqp://guest:guest@localhost//') as conn: + + # produce + producer = conn.Producer(serializer='json') + producer.publish({'name': '/tmp/lolcat1.avi', 'size': 1301013}, + exchange=media_exchange, routing_key='video', + declare=[video_queue]) + + # the declare above, makes sure the video queue is declared + # so that the messages can be delivered. + # It's a best practice in Kombu to have both publishers and + # consumers declare the queue. You can also declare the + # queue manually using: + # video_queue(conn).declare() + + # consume + with conn.Consumer(video_queue, callbacks=[process_media]) as consumer: + # Process messages and handle events on all channels + while True: + conn.drain_events() + + # Consume from several queues on the same channel: + video_queue = Queue('video', exchange=media_exchange, key='video') + image_queue = Queue('image', exchange=media_exchange, key='image') + + with connection.Consumer([video_queue, image_queue], + callbacks=[process_media]) as consumer: + while True: + connection.drain_events() + + +Or handle channels manually:: + + with connection.channel() as channel: + producer = Producer(channel, ...) + consumer = Producer(channel) + + +All objects can be used outside of with statements too, +just remember to close the objects after use:: + + from kombu import Connection, Consumer, Producer + + connection = Connection() + # ... + connection.release() + + consumer = Consumer(channel_or_connection, ...) + consumer.register_callback(my_callback) + consumer.consume() + # .... + consumer.cancel() + + +`Exchange` and `Queue` are simply declarations that can be pickled +and used in configuration files etc. + +They also support operations, but to do so they need to be bound +to a channel. + +Binding exchanges and queues to a connection will make it use +that connections default channel. + +:: + + >>> exchange = Exchange('tasks', 'direct') + + >>> connection = Connection() + >>> bound_exchange = exchange(connection) + >>> bound_exchange.delete() + + # the original exchange is not affected, and stays unbound. + >>> exchange.delete() + raise NotBoundError: Can't call delete on Exchange not bound to + a channel. + +Installation +============ + +You can install `Kombu` either via the Python Package Index (PyPI) +or from source. + +To install using `pip`,:: + + $ pip install kombu + +To install using `easy_install`,:: + + $ easy_install kombu + +If you have downloaded a source tarball you can install it +by doing the following,:: + + $ python setup.py build + # python setup.py install # as root + + +Terminology +=========== + +There are some concepts you should be familiar with before starting: + + * Producers + + Producers sends messages to an exchange. + + * Exchanges + + Messages are sent to exchanges. Exchanges are named and can be + configured to use one of several routing algorithms. The exchange + routes the messages to consumers by matching the routing key in the + message with the routing key the consumer provides when binding to + the exchange. + + * Consumers + + Consumers declares a queue, binds it to a exchange and receives + messages from it. + + * Queues + + Queues receive messages sent to exchanges. The queues are declared + by consumers. + + * Routing keys + + Every message has a routing key. The interpretation of the routing + key depends on the exchange type. There are four default exchange + types defined by the AMQP standard, and vendors can define custom + types (so see your vendors manual for details). + + These are the default exchange types defined by AMQP/0.8: + + * Direct exchange + + Matches if the routing key property of the message and + the `routing_key` attribute of the consumer are identical. + + * Fan-out exchange + + Always matches, even if the binding does not have a routing + key. + + * Topic exchange + + Matches the routing key property of the message by a primitive + pattern matching scheme. The message routing key then consists + of words separated by dots (`"."`, like domain names), and + two special characters are available; star (`"*"`) and hash + (`"#"`). The star matches any word, and the hash matches + zero or more words. For example `"*.stock.#"` matches the + routing keys `"usd.stock"` and `"eur.stock.db"` but not + `"stock.nasdaq"`. + +Getting Help +============ + +Mailing list +------------ + +Join the `carrot-users`_ mailing list. + +.. _`carrot-users`: http://groups.google.com/group/carrot-users/ + +Bug tracker +=========== + +If you have any suggestions, bug reports or annoyances please report them +to our issue tracker at http://github.com/celery/kombu/issues/ + +Contributing +============ + +Development of `Kombu` happens at Github: http://github.com/celery/kombu + +You are highly encouraged to participate in the development. If you don't +like Github (for some reason) you're welcome to send regular patches. + +License +======= + +This software is licensed under the `New BSD License`. See the `LICENSE` +file in the top distribution directory for the full license text. + +.. image:: https://d2weczhvl823v0.cloudfront.net/celery/kombu/trend.png + :alt: Bitdeli badge + :target: https://bitdeli.com/free + + diff --git a/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/RECORD b/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/RECORD new file mode 100644 index 0000000..f6822dc --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/RECORD @@ -0,0 +1,225 @@ +kombu/__init__.py,sha256=4Uo-AOGmmH9Qqf-ZsE_YQyh-1U8wDhbAgZTIHWfW6IQ,3835 +kombu/abstract.py,sha256=CyaVymSaUmh3XyC5PQUDthZ3blRamXl_YGHCDng8eNM,3340 +kombu/clocks.py,sha256=CrecSHrpWQZwMU7195Kwl8xUMp0nH7YITPqD45dz0kU,4530 +kombu/common.py,sha256=3Cpt0qxSEjDWr3QQfCHPuTF8h9eoXdme-jxEKdwswlE,12250 +kombu/compat.py,sha256=JYfjJvLnoOlK1n4OqYkSdAvKGCKdDstts0bEIFjS77Q,6563 +kombu/compression.py,sha256=-qxOpcxuW_DPpajSDW3nqqoF4Muv0ej_CvYEGYaK4LE,1986 +kombu/connection.py,sha256=y-McoGjBU_svVyVsIlIvAdvu0gaczugG-UjZryIWTSI,39109 +kombu/entity.py,sha256=s4092Z8eoSfxUDCsXSlzN878hpdT7uYxAu9MvyKO3p0,27112 +kombu/exceptions.py,sha256=WOqPvdzEF465Ymg_Vw_YTQgprCSS1saoKcrQ8l7tVMk,1860 +kombu/five.py,sha256=-PSdzhGz5DJ2wXg-ZhwIkiw3lAhh8S4_DtLFokXQn9k,5873 +kombu/log.py,sha256=nSXBiViapaGJ6FU35O1KW6vKzTyOjE9WMPNEm2BDe5I,4168 +kombu/message.py,sha256=CllqNnMruIbeGl2uycXgtkqjm6JvU3AdI82t-H1V20E,5113 +kombu/messaging.py,sha256=dwp7L9Jjp-mQP3wIV-PRD1gmovVA9AnzDlvlvcemsgQ,21846 +kombu/mixins.py,sha256=_ifLa6bSdpR49pu9katFRwSoMYlGa8TpSNoAvcBrV2E,8087 +kombu/pidbox.py,sha256=NRIphgR-PPj3WRNuUFtwJsUETOtT9KVrJ5dneybMoYw,12704 +kombu/pools.py,sha256=AcKzrzAPaHuaaguUyQngN66Kf4L5PqtbOx5KGRcSFzE,3812 +kombu/serialization.py,sha256=K_-LkesEyp9waknMhXmHf7MmuObd4hQDpVYrbCq74OE,14866 +kombu/simple.py,sha256=hrDmKlbOaXl0fbou40mZEUX8Y-9gRrw4NzsFDHXcvyo,4113 +kombu/syn.py,sha256=fHEIVL_SvufPViQSqtS8i5cJInqjzp-nmtDZbg3zvVY,1001 +kombu/async/__init__.py,sha256=0qauaJIVtrVIMQWDQ_BTnemvMJ_FypWO-ytwFDLI16Y,307 +kombu/async/debug.py,sha256=4CeWg8Z9woR4cZz_jSO2l-KnDFq0cOwuhGKFDMXPVIE,1484 +kombu/async/hub.py,sha256=VbblpAOJ3KRshwrreHlsx_WEA5e_Xlc791uCk3Y8xQs,10743 +kombu/async/semaphore.py,sha256=47WzHsBlMfzGwMTibeYfEUjwlCVXD5yF0GA3Zyc3ctI,2890 +kombu/async/timer.py,sha256=8Uh0ooiLdw4XU5aqpIZd8wAYdU5APZz8UraqJgp8Unc,6546 +kombu/tests/__init__.py,sha256=5Yy9-kQKeWW0yEw8eAMfIj8038d4Z5j3jHeqMfvSKDE,2646 +kombu/tests/case.py,sha256=1zXsc0Fi86SFkffwcZ6SdL2goO9XW0vYc8hi8POPlpE,5508 +kombu/tests/mocks.py,sha256=RFz5ZWXNDme64xHb4_vCTfNWxLPBQaR1pjNxvkrwzpw,4238 +kombu/tests/test_clocks.py,sha256=ZjruvEqaizN5txnNdfYtvsfaru08BUo19RUqIvCW68E,2660 +kombu/tests/test_common.py,sha256=jR_1BJjBiVszpVM98g061Uky7gEtNcBPWSAeN2egw7U,13870 +kombu/tests/test_compat.py,sha256=yIpOR_54eo1IQ1HHVLcsS4AwTlMm-tRC9EWgybsyUCw,11870 +kombu/tests/test_compression.py,sha256=DUnmAi-zfE21mwPqKC31V1CGPFjm-0E3ISu-SN02TZw,1508 +kombu/tests/test_connection.py,sha256=Pn5jGG4q5gaLr1u5oVpEICRCUnTocL44W6CapGgDRpQ,22558 +kombu/tests/test_entities.py,sha256=5g5W1Wm5O_VChLblklzSD5XfZ8CC7RHMT5LflrCaGkg,12046 +kombu/tests/test_log.py,sha256=hJMvtKBl_1vStyLywMfdIB9u7skC62SbI4MOI4J-UVU,5035 +kombu/tests/test_messaging.py,sha256=f730QgFPywW-mRLWiasiXTAoNjJHSNNe0IleJQFpx3I,23343 +kombu/tests/test_mixins.py,sha256=dnW4U-LuB24RW6ErwBPImzpbKF8Mdz_RzXJe2wRQvz0,8064 +kombu/tests/test_pidbox.py,sha256=nYoeMmz1rodXCVMZkCnfFpCBGQgPas8pq-cSx48MVws,9755 +kombu/tests/test_pools.py,sha256=B0S8oqtjUeow4mketeFIXpKp-2GXvtEOCXRoTNsa0FQ,7369 +kombu/tests/test_serialization.py,sha256=x6HsYeb2rzzI-PDH5j1GzW5yCm0VlIBduRV24Req6Qc,11657 +kombu/tests/test_simple.py,sha256=J7I7Y5C3ep3-gCVL4nfCk_qvWYgGpoO5i8lhcwQg3oo,3772 +kombu/tests/test_syn.py,sha256=CWvWoM8Sc59IPbxluLwM5FBJZLgKNKOrGo1_j9t-jVI,2044 +kombu/tests/async/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +kombu/tests/async/test_hub.py,sha256=CULTPO-o9_c1yD3HTy0H-LDTJiGhevpcQWF5M01eTz8,751 +kombu/tests/async/test_semaphore.py,sha256=VIis1Llkmh8pd_uTDM07YCoGG8Gd_wyM8BhAHkmvKSg,1250 +kombu/tests/transport/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +kombu/tests/transport/test_SQS.py,sha256=uggaWY9f17yy4_irgpeySpj9YkbueDzDd4BKmyG3p_8,9912 +kombu/tests/transport/test_amqplib.py,sha256=9LgFnVWz49VXYvREsTxt2p6xeMrG9Wn5ZLEDiTJNVrM,4704 +kombu/tests/transport/test_base.py,sha256=WchjrjxQn6ESllQk8btTM7CoJiB2_ICH64TuZsffkDA,4922 +kombu/tests/transport/test_filesystem.py,sha256=GdQC4Wl2M4olrN4hhTByaNAJBgA00VkKR6UP5s2YcQE,4288 +kombu/tests/transport/test_librabbitmq.py,sha256=jod0WKLpdTXNk5ZviK0q94Fehp_EGsaHp5Ue2iO78A8,4882 +kombu/tests/transport/test_memory.py,sha256=i1RirsEKqrF-VsJUefl93oDZ3pwL0ftSVcRDBKOp3YA,4918 +kombu/tests/transport/test_mongodb.py,sha256=lsi73ld5jVlk__eUlVtmwbK_NB14Nl4-GwzDVHVf_NI,3944 +kombu/tests/transport/test_pyamqp.py,sha256=KXhO9jEZdckscacjTmVOJcgdu5fhBTfDU4EIPQlOgGY,5435 +kombu/tests/transport/test_qpid.py,sha256=JFLeZW6psm8Rf5zHb0m3oGWOAVwOkiJSfZF6qngganY,72523 +kombu/tests/transport/test_redis.py,sha256=p8u5sSLlxo7vbtrkL9wQTD01V-_3VxtVr1GK63WA-3Y,40804 +kombu/tests/transport/test_sqlalchemy.py,sha256=Cl4YGtmjeLv6LuvziAO8N9vQWLO6_8iVYPp_nRGQ-jY,2249 +kombu/tests/transport/test_transport.py,sha256=fLpb9jmSlcntlNFxZX3UmED2As3InVMAaQaf6ZY7Ryg,1341 +kombu/tests/transport/virtual/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +kombu/tests/transport/virtual/test_base.py,sha256=dkVGI7iy79VK9tQcnvJ2uhcGhaoE4U3azd4ql9_e2oU,17753 +kombu/tests/transport/virtual/test_exchange.py,sha256=WSFNMLmp8XzHgZ-61Eh3cTyrdWELDdaQN-xgVD2qLhc,4859 +kombu/tests/transport/virtual/test_scheduling.py,sha256=or_y__Y4B6buIu9hF3GOVXtiLmqvL1_JXs8JscrVv6s,1835 +kombu/tests/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +kombu/tests/utils/test_amq_manager.py,sha256=S6RkDELUCAIgjhq--CcL0gOgRknFCuJqUoJqRi9w6RE,1238 +kombu/tests/utils/test_debug.py,sha256=1ZrSSqvjh1ApWQWRH8GqFLmztHjF-3Na7FmfYs8AEFg,1726 +kombu/tests/utils/test_encoding.py,sha256=KcbiAeBwl6i9H3vTXTa42vizpBFBI1XdTIU7kq6tBYs,3034 +kombu/tests/utils/test_functional.py,sha256=D-yp4s-whQNLfiorcbK7NBBqdrhhwBw2I2W6KWQrVhk,1652 +kombu/tests/utils/test_utils.py,sha256=uRR_WgTPErohDwNXP3mUDbj5dQE05ZPWi09x0gjITLE,11114 +kombu/transport/SLMQ.py,sha256=PHLWHrn0eFKNw3IxuqbLnqGryH0Ndm_JXvw398ZKKn4,6028 +kombu/transport/SQS.py,sha256=tAd7LC2bZh36eSICN0jNpnv8o2nCizFfH6Nj6chzSek,18661 +kombu/transport/__init__.py,sha256=ei-rvEKqQIuZq1jdOeHBCyJiCI_5U7owLUQI9iNruRg,3713 +kombu/transport/amqplib.py,sha256=3N6HemxbuWHyjyNkYenUpH70Jqt3mC4lnWUelrkee10,13345 +kombu/transport/base.py,sha256=Ah-TVNBhTrpLkdmN3KSdIDJKk0b0wSg7rACj5aiC5LQ,4698 +kombu/transport/beanstalk.py,sha256=2kxDPCDv9_E3oa-8HOnzf4s6Yoj38yfF7ns95MMgLkg,4050 +kombu/transport/couchdb.py,sha256=E-uY055z9f_Nb33i9GlkjMtkXTjxHRLstHC7V8xJydk,3992 +kombu/transport/filesystem.py,sha256=hzoDYySrOMHu7q11jAfahACjE6hntz_dm7_SrjseLb4,5533 +kombu/transport/librabbitmq.py,sha256=xHRPfA9C4AKDXaLz3QIeF1Ua-TBmLWAwb-t7LF_EWWo,5450 +kombu/transport/memory.py,sha256=qx5ja7aOwM0LcFBvGcYHJI2sz7zewqJ0i_BlkBGrkec,1784 +kombu/transport/mongodb.py,sha256=X6qYxLoI3sPkykioPiCiPxjOlkfYxGcCNnIYDedUhjk,10146 +kombu/transport/pyamqp.py,sha256=OB1aFgqmOuyqgNkqAlvfKhky8X10PL8SrF4zoQGgVqM,5008 +kombu/transport/pyro.py,sha256=rxzZ69VTu9PqNqjvlR3obHNjsGmiMA_Jglqi5ozLreM,2468 +kombu/transport/qpid.py,sha256=UcYjuLoqFsFFo8XdSqqOshsDI0Laq8I0xwDOPM34_24,71570 +kombu/transport/redis.py,sha256=ijVAW6mapqk6zvGJLkLGCVKK4U-ZGCS_9m_YpH13B14,35961 +kombu/transport/zmq.py,sha256=w3LSH-5ldvYkJ7THbxon8EHoZj0CvQbB0LyPZkUZc5M,8635 +kombu/transport/zookeeper.py,sha256=HnX1fgyq4HmiGPiQzcb99kHoALCT9Qsb2h9WgtqmEcY,5232 +kombu/transport/django/__init__.py,sha256=J7Q3O5J8IbcGE-KWUj0qzq-fD4P9JBxnxC5T6gp0KKE,2376 +kombu/transport/django/managers.py,sha256=IwI6utNi0xtjyYr6XDc9umhRnDBO7kbvLyV5EdXbHKA,2489 +kombu/transport/django/models.py,sha256=196P-lR_po4qj8XtrIV9MC0WeEL0kvf378zEXr6Tv30,1125 +kombu/transport/django/management/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +kombu/transport/django/management/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +kombu/transport/django/management/commands/clean_kombu_messages.py,sha256=JFN3X5W2VBXmlmOy8CFO9tW6W9v9myhkJFPAAVEIJO4,564 +kombu/transport/django/migrations/0001_initial.py,sha256=N1Cesbz1K3d-LPd-22OqMlwKnxq0oHLdOmv54QuPqEc,1688 +kombu/transport/django/migrations/__init__.py,sha256=nAs3tXW0M0-MrurJHjtCa31LxGpJyiqvHwiJn4cz8yM,477 +kombu/transport/django/south_migrations/0001_initial.py,sha256=q6m1OVi3RPnfQoz8XEti-2U6H3eBidRGko8nXFwAWGg,2435 +kombu/transport/django/south_migrations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +kombu/transport/sqlalchemy/__init__.py,sha256=Xv7bR-Vou0ClW6wYFKBMFvYIZjXz2zGrrVnCcuTB9sY,5186 +kombu/transport/sqlalchemy/models.py,sha256=IGoY-77N8DVeNTcDnf3WpRl5PANIYdLeqhV4doBVS-4,1961 +kombu/transport/virtual/__init__.py,sha256=Skny6J8rd3hADewgKjeys86k4xY7vFeSgUFGkXp8P9g,27656 +kombu/transport/virtual/exchange.py,sha256=SqvRaRZ0CiHnc_lfTY-H4riUy1yR-URSUhsJMIoM_CM,4580 +kombu/transport/virtual/scheduling.py,sha256=Oy4pw9Rv_A0oO4Z7kZ7llIHV0xQtza0mEkmmzbC_De0,1293 +kombu/utils/__init__.py,sha256=GqZ0PryBma5B__9Qs0O-fwab2r81yt7IsnFpXo_3i3g,12781 +kombu/utils/amq_manager.py,sha256=s6hHwoFFbNozyX027I4Da6PeSBWpr-x1y4CXguXpYp4,655 +kombu/utils/compat.py,sha256=tsvzlK5NNeQfGo8G0mX1QFFimOZdoYo5MA7tvzE2Kqg,1553 +kombu/utils/debug.py,sha256=X1jmxUe561akqMlAj4gSGaUEFl9pv_oi06hXixutxAc,1668 +kombu/utils/encoding.py,sha256=lWT0Rggy-rKsTLOoUHsOq9QD8j6qWZvRxIBMMtpZTBA,3085 +kombu/utils/eventio.py,sha256=Y3uIXt1RNVJSVelMbfPiSDC5YTSHSp8tUj4MlhzPlyo,7867 +kombu/utils/functional.py,sha256=6yt_fqyspa5Qi_7JMUb2VW1cj12Sz8D6piB71fUv-cI,2069 +kombu/utils/limits.py,sha256=BOKJhU55TSr-z02q_R3MPapm16t-jdwQaPlMpphQA0A,2195 +kombu/utils/text.py,sha256=UboB5LotK2uO_fJkEn_o2faZaIFjeXCTVP93hP8UGf4,1383 +kombu/utils/url.py,sha256=XklOtY5w3rP_IZaIs4V2AseaH1IyZVa7IDvco_Wo0UE,2088 +kombu-3.0.37.dist-info/DESCRIPTION.rst,sha256=QVM1W90ZxpN9Y8_g2Fx0MxgfvOBaTJAyYlkFDnHpX68,11395 +kombu-3.0.37.dist-info/METADATA,sha256=OEf6AwLL2psD-LshHTXHQVNZputk0AtdNKfXX_Kpedw,12730 +kombu-3.0.37.dist-info/RECORD,, +kombu-3.0.37.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 +kombu-3.0.37.dist-info/metadata.json,sha256=jx9N6lWEQATDPIuADa9XLKMX30WB743MCIFYF-UHkvQ,1461 +kombu-3.0.37.dist-info/top_level.txt,sha256=uoTZ9rdRBzLZu_Hnt_3txqi3DQMIaLwFr3LXJ8HT0G4,6 +kombu-3.0.37.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +kombu/transport/__pycache__/qpid.cpython-36.pyc,, +kombu/transport/__pycache__/zookeeper.cpython-36.pyc,, +kombu/transport/__pycache__/filesystem.cpython-36.pyc,, +kombu/transport/__pycache__/beanstalk.cpython-36.pyc,, +kombu/transport/__pycache__/pyamqp.cpython-36.pyc,, +kombu/transport/__pycache__/zmq.cpython-36.pyc,, +kombu/transport/__pycache__/librabbitmq.cpython-36.pyc,, +kombu/transport/__pycache__/amqplib.cpython-36.pyc,, +kombu/transport/__pycache__/pyro.cpython-36.pyc,, +kombu/transport/__pycache__/base.cpython-36.pyc,, +kombu/transport/__pycache__/memory.cpython-36.pyc,, +kombu/transport/__pycache__/couchdb.cpython-36.pyc,, +kombu/transport/__pycache__/mongodb.cpython-36.pyc,, +kombu/transport/__pycache__/SLMQ.cpython-36.pyc,, +kombu/transport/__pycache__/__init__.cpython-36.pyc,, +kombu/transport/__pycache__/redis.cpython-36.pyc,, +kombu/transport/__pycache__/SQS.cpython-36.pyc,, +kombu/transport/sqlalchemy/__pycache__/models.cpython-36.pyc,, +kombu/transport/sqlalchemy/__pycache__/__init__.cpython-36.pyc,, +kombu/transport/virtual/__pycache__/exchange.cpython-36.pyc,, +kombu/transport/virtual/__pycache__/scheduling.cpython-36.pyc,, +kombu/transport/virtual/__pycache__/__init__.cpython-36.pyc,, +kombu/transport/django/migrations/__pycache__/0001_initial.cpython-36.pyc,, +kombu/transport/django/migrations/__pycache__/__init__.cpython-36.pyc,, +kombu/transport/django/management/__pycache__/__init__.cpython-36.pyc,, +kombu/transport/django/management/commands/__pycache__/clean_kombu_messages.cpython-36.pyc,, +kombu/transport/django/management/commands/__pycache__/__init__.cpython-36.pyc,, +kombu/transport/django/__pycache__/managers.cpython-36.pyc,, +kombu/transport/django/__pycache__/models.cpython-36.pyc,, +kombu/transport/django/__pycache__/__init__.cpython-36.pyc,, +kombu/transport/django/south_migrations/__pycache__/0001_initial.cpython-36.pyc,, +kombu/transport/django/south_migrations/__pycache__/__init__.cpython-36.pyc,, +kombu/async/__pycache__/debug.cpython-36.pyc,, +kombu/async/__pycache__/timer.cpython-36.pyc,, +kombu/async/__pycache__/semaphore.cpython-36.pyc,, +kombu/async/__pycache__/hub.cpython-36.pyc,, +kombu/async/__pycache__/__init__.cpython-36.pyc,, +kombu/tests/transport/__pycache__/test_librabbitmq.cpython-36.pyc,, +kombu/tests/transport/__pycache__/test_base.cpython-36.pyc,, +kombu/tests/transport/__pycache__/test_SQS.cpython-36.pyc,, +kombu/tests/transport/__pycache__/test_redis.cpython-36.pyc,, +kombu/tests/transport/__pycache__/test_memory.cpython-36.pyc,, +kombu/tests/transport/__pycache__/test_transport.cpython-36.pyc,, +kombu/tests/transport/__pycache__/test_sqlalchemy.cpython-36.pyc,, +kombu/tests/transport/__pycache__/test_mongodb.cpython-36.pyc,, +kombu/tests/transport/__pycache__/test_pyamqp.cpython-36.pyc,, +kombu/tests/transport/__pycache__/test_qpid.cpython-36.pyc,, +kombu/tests/transport/__pycache__/test_amqplib.cpython-36.pyc,, +kombu/tests/transport/__pycache__/__init__.cpython-36.pyc,, +kombu/tests/transport/__pycache__/test_filesystem.cpython-36.pyc,, +kombu/tests/transport/virtual/__pycache__/test_exchange.cpython-36.pyc,, +kombu/tests/transport/virtual/__pycache__/test_scheduling.cpython-36.pyc,, +kombu/tests/transport/virtual/__pycache__/test_base.cpython-36.pyc,, +kombu/tests/transport/virtual/__pycache__/__init__.cpython-36.pyc,, +kombu/tests/async/__pycache__/test_semaphore.cpython-36.pyc,, +kombu/tests/async/__pycache__/test_hub.cpython-36.pyc,, +kombu/tests/async/__pycache__/__init__.cpython-36.pyc,, +kombu/tests/utils/__pycache__/test_utils.cpython-36.pyc,, +kombu/tests/utils/__pycache__/test_amq_manager.cpython-36.pyc,, +kombu/tests/utils/__pycache__/test_debug.cpython-36.pyc,, +kombu/tests/utils/__pycache__/test_encoding.cpython-36.pyc,, +kombu/tests/utils/__pycache__/__init__.cpython-36.pyc,, +kombu/tests/utils/__pycache__/test_functional.cpython-36.pyc,, +kombu/tests/__pycache__/mocks.cpython-36.pyc,, +kombu/tests/__pycache__/test_connection.cpython-36.pyc,, +kombu/tests/__pycache__/test_compression.cpython-36.pyc,, +kombu/tests/__pycache__/test_log.cpython-36.pyc,, +kombu/tests/__pycache__/test_entities.cpython-36.pyc,, +kombu/tests/__pycache__/test_serialization.cpython-36.pyc,, +kombu/tests/__pycache__/test_common.cpython-36.pyc,, +kombu/tests/__pycache__/test_simple.cpython-36.pyc,, +kombu/tests/__pycache__/test_compat.cpython-36.pyc,, +kombu/tests/__pycache__/test_pidbox.cpython-36.pyc,, +kombu/tests/__pycache__/test_mixins.cpython-36.pyc,, +kombu/tests/__pycache__/test_syn.cpython-36.pyc,, +kombu/tests/__pycache__/case.cpython-36.pyc,, +kombu/tests/__pycache__/test_messaging.cpython-36.pyc,, +kombu/tests/__pycache__/test_pools.cpython-36.pyc,, +kombu/tests/__pycache__/test_clocks.cpython-36.pyc,, +kombu/tests/__pycache__/__init__.cpython-36.pyc,, +kombu/utils/__pycache__/debug.cpython-36.pyc,, +kombu/utils/__pycache__/amq_manager.cpython-36.pyc,, +kombu/utils/__pycache__/eventio.cpython-36.pyc,, +kombu/utils/__pycache__/url.cpython-36.pyc,, +kombu/utils/__pycache__/functional.cpython-36.pyc,, +kombu/utils/__pycache__/text.cpython-36.pyc,, +kombu/utils/__pycache__/encoding.cpython-36.pyc,, +kombu/utils/__pycache__/compat.cpython-36.pyc,, +kombu/utils/__pycache__/limits.cpython-36.pyc,, +kombu/utils/__pycache__/__init__.cpython-36.pyc,, +kombu/__pycache__/abstract.cpython-36.pyc,, +kombu/__pycache__/clocks.cpython-36.pyc,, +kombu/__pycache__/syn.cpython-36.pyc,, +kombu/__pycache__/entity.cpython-36.pyc,, +kombu/__pycache__/exceptions.cpython-36.pyc,, +kombu/__pycache__/pools.cpython-36.pyc,, +kombu/__pycache__/compression.cpython-36.pyc,, +kombu/__pycache__/simple.cpython-36.pyc,, +kombu/__pycache__/message.cpython-36.pyc,, +kombu/__pycache__/common.cpython-36.pyc,, +kombu/__pycache__/messaging.cpython-36.pyc,, +kombu/__pycache__/pidbox.cpython-36.pyc,, +kombu/__pycache__/mixins.cpython-36.pyc,, +kombu/__pycache__/compat.cpython-36.pyc,, +kombu/__pycache__/connection.cpython-36.pyc,, +kombu/__pycache__/five.cpython-36.pyc,, +kombu/__pycache__/log.cpython-36.pyc,, +kombu/__pycache__/serialization.cpython-36.pyc,, +kombu/__pycache__/__init__.cpython-36.pyc,, diff --git a/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/WHEEL b/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/WHEEL new file mode 100644 index 0000000..8b6dd1b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.29.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/metadata.json b/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/metadata.json new file mode 100644 index 0000000..c529292 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 5 - Production/Stable", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: Python :: Implementation :: Jython", "Intended Audience :: Developers", "Topic :: Communications", "Topic :: System :: Distributed Computing", "Topic :: System :: Networking", "Topic :: Software Development :: Libraries :: Python Modules"], "extensions": {"python.details": {"contacts": [{"email": "ask@celeryproject.org", "name": "Ask Solem", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://kombu.readthedocs.io"}}}, "extras": [], "generator": "bdist_wheel (0.29.0)", "metadata_version": "2.0", "name": "kombu", "platform": "any", "run_requires": [{"requires": ["amqp >= 1.4.9,<2.0", "anyjson >= 0.3.3"]}, {"environment": "python_version == \"2.6\"", "requires": ["importlib", "ordereddict"]}], "summary": "Messaging library for Python", "test_requires": [{"requires": ["mock", "nose", "unittest2 (>=0.5.0)"]}], "version": "3.0.37"} \ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/top_level.txt b/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/top_level.txt new file mode 100644 index 0000000..3a8f969 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu-3.0.37.dist-info/top_level.txt @@ -0,0 +1 @@ +kombu diff --git a/thesisenv/lib/python3.6/site-packages/kombu/__init__.py b/thesisenv/lib/python3.6/site-packages/kombu/__init__.py new file mode 100644 index 0000000..7e53a6b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/__init__.py @@ -0,0 +1,108 @@ +"""Messaging library for Python""" +from __future__ import absolute_import + +import os +import sys + +from collections import namedtuple +from types import ModuleType + +version_info_t = namedtuple( + 'version_info_t', ('major', 'minor', 'micro', 'releaselevel', 'serial'), +) + +VERSION = version_info_t(3, 0, 37, '', '') +__version__ = '{0.major}.{0.minor}.{0.micro}{0.releaselevel}'.format(VERSION) +__author__ = 'Ask Solem' +__contact__ = 'ask@celeryproject.org' +__homepage__ = 'https://kombu.readthedocs.io' +__docformat__ = 'restructuredtext en' + +# -eof meta- + +if sys.version_info < (2, 6): # pragma: no cover + raise Exception('Kombu 3.1 requires Python versions 2.6 or later.') + +STATICA_HACK = True +globals()['kcah_acitats'[::-1].upper()] = False +if STATICA_HACK: # pragma: no cover + # This is never executed, but tricks static analyzers (PyDev, PyCharm, + # pylint, etc.) into knowing the types of these symbols, and what + # they contain. + from kombu.connection import Connection, BrokerConnection # noqa + from kombu.entity import Exchange, Queue, binding # noqa + from kombu.messaging import Consumer, Producer # noqa + from kombu.pools import connections, producers # noqa + from kombu.utils.url import parse_url # noqa + from kombu.common import eventloop, uuid # noqa + from kombu.serialization import ( # noqa + enable_insecure_serializers, + disable_insecure_serializers, + ) + +# Lazy loading. +# - See werkzeug/__init__.py for the rationale behind this. + +all_by_module = { + 'kombu.connection': ['Connection', 'BrokerConnection'], + 'kombu.entity': ['Exchange', 'Queue', 'binding'], + 'kombu.messaging': ['Consumer', 'Producer'], + 'kombu.pools': ['connections', 'producers'], + 'kombu.utils.url': ['parse_url'], + 'kombu.common': ['eventloop', 'uuid'], + 'kombu.serialization': ['enable_insecure_serializers', + 'disable_insecure_serializers'], +} + +object_origins = {} +for module, items in all_by_module.items(): + for item in items: + object_origins[item] = module + + +class module(ModuleType): + + def __getattr__(self, name): + if name in object_origins: + module = __import__(object_origins[name], None, None, [name]) + for extra_name in all_by_module[module.__name__]: + setattr(self, extra_name, getattr(module, extra_name)) + return getattr(module, name) + return ModuleType.__getattribute__(self, name) + + def __dir__(self): + result = list(new_module.__all__) + result.extend(('__file__', '__path__', '__doc__', '__all__', + '__docformat__', '__name__', '__path__', 'VERSION', + '__package__', '__version__', '__author__', + '__contact__', '__homepage__', '__docformat__')) + return result + +# 2.5 does not define __package__ +try: + package = __package__ +except NameError: # pragma: no cover + package = 'kombu' + +# keep a reference to this module so that it's not garbage collected +old_module = sys.modules[__name__] + +new_module = sys.modules[__name__] = module(__name__) +new_module.__dict__.update({ + '__file__': __file__, + '__path__': __path__, + '__doc__': __doc__, + '__all__': tuple(object_origins), + '__version__': __version__, + '__author__': __author__, + '__contact__': __contact__, + '__homepage__': __homepage__, + '__docformat__': __docformat__, + '__package__': package, + 'version_info_t': version_info_t, + 'VERSION': VERSION}) + +if os.environ.get('KOMBU_LOG_DEBUG'): # pragma: no cover + os.environ.update(KOMBU_LOG_CHANNEL='1', KOMBU_LOG_CONNECTION='1') + from .utils import debug + debug.setup_logging() diff --git a/thesisenv/lib/python3.6/site-packages/kombu/abstract.py b/thesisenv/lib/python3.6/site-packages/kombu/abstract.py new file mode 100644 index 0000000..e55f0c5 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/abstract.py @@ -0,0 +1,119 @@ +""" +kombu.abstract +============== + +Object utilities. + +""" +from __future__ import absolute_import + +from copy import copy + +from .connection import maybe_channel +from .exceptions import NotBoundError +from .utils import ChannelPromise + +__all__ = ['Object', 'MaybeChannelBound'] + + +def unpickle_dict(cls, kwargs): + return cls(**kwargs) + + +def _any(v): + return v + + +class Object(object): + """Common base class supporting automatic kwargs->attributes handling, + and cloning.""" + attrs = () + + def __init__(self, *args, **kwargs): + for name, type_ in self.attrs: + value = kwargs.get(name) + if value is not None: + setattr(self, name, (type_ or _any)(value)) + else: + try: + getattr(self, name) + except AttributeError: + setattr(self, name, None) + + def as_dict(self, recurse=False): + def f(obj, type): + if recurse and isinstance(obj, Object): + return obj.as_dict(recurse=True) + return type(obj) if type else obj + return dict( + (attr, f(getattr(self, attr), type)) for attr, type in self.attrs + ) + + def __reduce__(self): + return unpickle_dict, (self.__class__, self.as_dict()) + + def __copy__(self): + return self.__class__(**self.as_dict()) + + +class MaybeChannelBound(Object): + """Mixin for classes that can be bound to an AMQP channel.""" + _channel = None + _is_bound = False + + #: Defines whether maybe_declare can skip declaring this entity twice. + can_cache_declaration = False + + def __call__(self, channel): + """`self(channel) -> self.bind(channel)`""" + return self.bind(channel) + + def bind(self, channel): + """Create copy of the instance that is bound to a channel.""" + return copy(self).maybe_bind(channel) + + def maybe_bind(self, channel): + """Bind instance to channel if not already bound.""" + if not self.is_bound and channel: + self._channel = maybe_channel(channel) + self.when_bound() + self._is_bound = True + return self + + def revive(self, channel): + """Revive channel after the connection has been re-established. + + Used by :meth:`~kombu.Connection.ensure`. + + """ + if self.is_bound: + self._channel = channel + self.when_bound() + + def when_bound(self): + """Callback called when the class is bound.""" + pass + + def __repr__(self, item=''): + item = item or type(self).__name__ + if self.is_bound: + return '<{0} bound to chan:{1}>'.format( + item or type(self).__name__, self.channel.channel_id) + return ''.format(item) + + @property + def is_bound(self): + """Flag set if the channel is bound.""" + return self._is_bound and self._channel is not None + + @property + def channel(self): + """Current channel if the object is bound.""" + channel = self._channel + if channel is None: + raise NotBoundError( + "Can't call method on {0} not bound to a channel".format( + type(self).__name__)) + if isinstance(channel, ChannelPromise): + channel = self._channel = channel() + return channel diff --git a/thesisenv/lib/python3.6/site-packages/kombu/async/__init__.py b/thesisenv/lib/python3.6/site-packages/kombu/async/__init__.py new file mode 100644 index 0000000..c6e8e8e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/async/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +""" +kombu.async +=========== + +Event loop implementation. + +""" +from __future__ import absolute_import + +from .hub import Hub, get_event_loop, set_event_loop + +from kombu.utils.eventio import READ, WRITE, ERR + +__all__ = ['READ', 'WRITE', 'ERR', 'Hub', 'get_event_loop', 'set_event_loop'] diff --git a/thesisenv/lib/python3.6/site-packages/kombu/async/debug.py b/thesisenv/lib/python3.6/site-packages/kombu/async/debug.py new file mode 100644 index 0000000..80cdcb7 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/async/debug.py @@ -0,0 +1,60 @@ +from __future__ import absolute_import + +from kombu.five import items +from kombu.utils import reprcall +from kombu.utils.eventio import READ, WRITE, ERR + + +def repr_flag(flag): + return '{0}{1}{2}'.format('R' if flag & READ else '', + 'W' if flag & WRITE else '', + '!' if flag & ERR else '') + + +def _rcb(obj): + if obj is None: + return '' + if isinstance(obj, str): + return obj + if isinstance(obj, tuple): + cb, args = obj + return reprcall(cb.__name__, args=args) + return obj.__name__ + + +def repr_active(h): + return ', '.join(repr_readers(h) + repr_writers(h)) + + +def repr_events(h, events): + return ', '.join( + '{0}({1})->{2}'.format( + _rcb(callback_for(h, fd, fl, '(GONE)')), fd, + repr_flag(fl), + ) + for fd, fl in events + ) + + +def repr_readers(h): + return ['({0}){1}->{2}'.format(fd, _rcb(cb), repr_flag(READ | ERR)) + for fd, cb in items(h.readers)] + + +def repr_writers(h): + return ['({0}){1}->{2}'.format(fd, _rcb(cb), repr_flag(WRITE)) + for fd, cb in items(h.writers)] + + +def callback_for(h, fd, flag, *default): + try: + if flag & READ: + return h.readers[fd] + if flag & WRITE: + if fd in h.consolidate: + return h.consolidate_callback + return h.writers[fd] + except KeyError: + if default: + return default[0] + raise diff --git a/thesisenv/lib/python3.6/site-packages/kombu/async/hub.py b/thesisenv/lib/python3.6/site-packages/kombu/async/hub.py new file mode 100644 index 0000000..66067bf --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/async/hub.py @@ -0,0 +1,366 @@ +# -*- coding: utf-8 -*- +""" +kombu.async.hub +=============== + +Event loop implementation. + +""" +from __future__ import absolute_import + +import errno + +from collections import deque +from contextlib import contextmanager +from time import sleep +from types import GeneratorType as generator + +from amqp import promise + +from kombu.five import Empty, range +from kombu.log import get_logger +from kombu.utils import cached_property, fileno +from kombu.utils.compat import get_errno +from kombu.utils.eventio import READ, WRITE, ERR, poll + +from .timer import Timer + +__all__ = ['Hub', 'get_event_loop', 'set_event_loop'] +logger = get_logger(__name__) + +_current_loop = None + +W_UNKNOWN_EVENT = """\ +Received unknown event %r for fd %r, please contact support!\ +""" + + +class Stop(BaseException): + """Stops the event loop.""" + + +def _raise_stop_error(): + raise Stop() + + +@contextmanager +def _dummy_context(*args, **kwargs): + yield + + +def get_event_loop(): + return _current_loop + + +def set_event_loop(loop): + global _current_loop + _current_loop = loop + return loop + + +class Hub(object): + """Event loop object. + + :keyword timer: Specify timer object. + + """ + #: Flag set if reading from an fd will not block. + READ = READ + + #: Flag set if writing to an fd will not block. + WRITE = WRITE + + #: Flag set on error, and the fd should be read from asap. + ERR = ERR + + #: List of callbacks to be called when the loop is exiting, + #: applied with the hub instance as sole argument. + on_close = None + + def __init__(self, timer=None): + self.timer = timer if timer is not None else Timer() + + self.readers = {} + self.writers = {} + self.on_tick = set() + self.on_close = set() + self._ready = deque() + + self._running = False + self._loop = None + + # The eventloop (in celery.worker.loops) + # will merge fds in this set and then instead of calling + # the callback for each ready fd it will call the + # :attr:`consolidate_callback` with the list of ready_fds + # as an argument. This API is internal and is only + # used by the multiprocessing pool to find inqueues + # that are ready to write. + self.consolidate = set() + self.consolidate_callback = None + + self.propagate_errors = () + + self._create_poller() + + def reset(self): + self.close() + self._create_poller() + + def _create_poller(self): + self.poller = poll() + self._register_fd = self.poller.register + self._unregister_fd = self.poller.unregister + + def _close_poller(self): + if self.poller is not None: + self.poller.close() + self.poller = None + self._register_fd = None + self._unregister_fd = None + + def stop(self): + self.call_soon(_raise_stop_error) + + def __repr__(self): + return ''.format( + id(self), len(self.readers), len(self.writers), + ) + + def fire_timers(self, min_delay=1, max_delay=10, max_timers=10, + propagate=()): + timer = self.timer + delay = None + if timer and timer._queue: + for i in range(max_timers): + delay, entry = next(self.scheduler) + if entry is None: + break + try: + entry() + except propagate: + raise + except (MemoryError, AssertionError): + raise + except OSError as exc: + if get_errno(exc) == errno.ENOMEM: + raise + logger.error('Error in timer: %r', exc, exc_info=1) + except Exception as exc: + logger.error('Error in timer: %r', exc, exc_info=1) + return min(delay or min_delay, max_delay) + + def _remove_from_loop(self, fd): + try: + self._unregister(fd) + finally: + self._discard(fd) + + def add(self, fd, callback, flags, args=(), consolidate=False): + fd = fileno(fd) + try: + self.poller.register(fd, flags) + except ValueError: + self._remove_from_loop(fd) + raise + else: + dest = self.readers if flags & READ else self.writers + if consolidate: + self.consolidate.add(fd) + dest[fd] = None + else: + dest[fd] = callback, args + + def remove(self, fd): + fd = fileno(fd) + self._remove_from_loop(fd) + + def run_forever(self): + self._running = True + try: + while 1: + try: + self.run_once() + except Stop: + break + finally: + self._running = False + + def run_once(self): + try: + next(self.loop) + except StopIteration: + self._loop = None + + def call_soon(self, callback, *args): + handle = promise(callback, args) + self._ready.append(handle) + return handle + + def call_later(self, delay, callback, *args): + return self.timer.call_after(delay, callback, args) + + def call_at(self, when, callback, *args): + return self.timer.call_at(when, callback, args) + + def call_repeatedly(self, delay, callback, *args): + return self.timer.call_repeatedly(delay, callback, args) + + def add_reader(self, fds, callback, *args): + return self.add(fds, callback, READ | ERR, args) + + def add_writer(self, fds, callback, *args): + return self.add(fds, callback, WRITE, args) + + def remove_reader(self, fd): + writable = fd in self.writers + on_write = self.writers.get(fd) + try: + self._remove_from_loop(fd) + finally: + if writable: + cb, args = on_write + self.add(fd, cb, WRITE, args) + + def remove_writer(self, fd): + readable = fd in self.readers + on_read = self.readers.get(fd) + try: + self._remove_from_loop(fd) + finally: + if readable: + cb, args = on_read + self.add(fd, cb, READ | ERR, args) + + def _unregister(self, fd): + try: + self.poller.unregister(fd) + except (AttributeError, KeyError, OSError): + pass + + def close(self, *args): + [self._unregister(fd) for fd in self.readers] + self.readers.clear() + [self._unregister(fd) for fd in self.writers] + self.writers.clear() + self.consolidate.clear() + self._close_poller() + for callback in self.on_close: + callback(self) + + def _discard(self, fd): + fd = fileno(fd) + self.readers.pop(fd, None) + self.writers.pop(fd, None) + self.consolidate.discard(fd) + + def create_loop(self, + generator=generator, sleep=sleep, min=min, next=next, + Empty=Empty, StopIteration=StopIteration, + KeyError=KeyError, READ=READ, WRITE=WRITE, ERR=ERR): + readers, writers = self.readers, self.writers + poll = self.poller.poll + fire_timers = self.fire_timers + hub_remove = self.remove + scheduled = self.timer._queue + consolidate = self.consolidate + consolidate_callback = self.consolidate_callback + on_tick = self.on_tick + todo = self._ready + propagate = self.propagate_errors + + while 1: + for tick_callback in on_tick: + tick_callback() + + while todo: + item = todo.popleft() + if item: + item() + + poll_timeout = fire_timers(propagate=propagate) if scheduled else 1 + if readers or writers: + to_consolidate = [] + try: + events = poll(poll_timeout) + except ValueError: # Issue 882 + raise StopIteration() + + for fd, event in events or (): + general_error = False + if fd in consolidate and \ + writers.get(fd) is None: + to_consolidate.append(fd) + continue + cb = cbargs = None + + if event & READ: + try: + cb, cbargs = readers[fd] + except KeyError: + self.remove_reader(fd) + continue + elif event & WRITE: + try: + cb, cbargs = writers[fd] + except KeyError: + self.remove_writer(fd) + continue + elif event & ERR: + general_error = True + else: + logger.info(W_UNKNOWN_EVENT, event, fd) + general_error = True + + if general_error: + try: + cb, cbargs = (readers.get(fd) or + writers.get(fd)) + except TypeError: + pass + + if cb is None: + self.remove(fd) + continue + + if isinstance(cb, generator): + try: + next(cb) + except OSError as exc: + if get_errno(exc) != errno.EBADF: + raise + hub_remove(fd) + except StopIteration: + pass + except Exception: + hub_remove(fd) + raise + else: + try: + cb(*cbargs) + except Empty: + pass + if to_consolidate: + consolidate_callback(to_consolidate) + else: + # no sockets yet, startup is probably not done. + sleep(min(poll_timeout, 0.1)) + yield + + def repr_active(self): + from .debug import repr_active + return repr_active(self) + + def repr_events(self, events): + from .debug import repr_events + return repr_events(self, events) + + @cached_property + def scheduler(self): + return iter(self.timer) + + @property + def loop(self): + if self._loop is None: + self._loop = self.create_loop() + return self._loop diff --git a/thesisenv/lib/python3.6/site-packages/kombu/async/semaphore.py b/thesisenv/lib/python3.6/site-packages/kombu/async/semaphore.py new file mode 100644 index 0000000..b446441 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/async/semaphore.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +""" +kombu.async.semaphore +===================== + +Semaphores and concurrency primitives. + +""" +from __future__ import absolute_import + +from collections import deque + +__all__ = ['DummyLock', 'LaxBoundedSemaphore'] + + +class LaxBoundedSemaphore(object): + """Asynchronous Bounded Semaphore. + + Lax means that the value will stay within the specified + range even if released more times than it was acquired. + + Example: + + >>> from future import print_statement as printf + # ^ ignore: just fooling stupid pyflakes + + >>> x = LaxBoundedSemaphore(2) + + >>> x.acquire(printf, 'HELLO 1') + HELLO 1 + + >>> x.acquire(printf, 'HELLO 2') + HELLO 2 + + >>> x.acquire(printf, 'HELLO 3') + >>> x._waiters # private, do not access directly + [print, ('HELLO 3', )] + + >>> x.release() + HELLO 3 + + """ + + def __init__(self, value): + self.initial_value = self.value = value + self._waiting = deque() + self._add_waiter = self._waiting.append + self._pop_waiter = self._waiting.popleft + + def acquire(self, callback, *partial_args): + """Acquire semaphore, applying ``callback`` if + the resource is available. + + :param callback: The callback to apply. + :param \*partial_args: partial arguments to callback. + + """ + value = self.value + if value <= 0: + self._add_waiter((callback, partial_args)) + return False + else: + self.value = max(value - 1, 0) + callback(*partial_args) + return True + + def release(self): + """Release semaphore. + + If there are any waiters this will apply the first waiter + that is waiting for the resource (FIFO order). + + """ + try: + waiter, args = self._pop_waiter() + except IndexError: + self.value = min(self.value + 1, self.initial_value) + else: + waiter(*args) + + def grow(self, n=1): + """Change the size of the semaphore to accept more users.""" + self.initial_value += n + self.value += n + [self.release() for _ in range(n)] + + def shrink(self, n=1): + """Change the size of the semaphore to accept less users.""" + self.initial_value = max(self.initial_value - n, 0) + self.value = max(self.value - n, 0) + + def clear(self): + """Reset the semaphore, which also wipes out any waiting callbacks.""" + self._waiting.clear() + self.value = self.initial_value + + def __repr__(self): + return '<{0} at {1:#x} value:{2} waiting:{3}>'.format( + self.__class__.__name__, id(self), self.value, len(self._waiting), + ) + + +class DummyLock(object): + """Pretending to be a lock.""" + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + pass diff --git a/thesisenv/lib/python3.6/site-packages/kombu/async/timer.py b/thesisenv/lib/python3.6/site-packages/kombu/async/timer.py new file mode 100644 index 0000000..00f5412 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/async/timer.py @@ -0,0 +1,232 @@ +# -*- coding: utf-8 -*- +""" +kombu.async.timer +================= + +Timer scheduling Python callbacks. + +""" +from __future__ import absolute_import + +import heapq +import sys + +from collections import namedtuple +from datetime import datetime +from functools import wraps +from time import time +from weakref import proxy as weakrefproxy + +from kombu.five import monotonic +from kombu.log import get_logger +from kombu.utils.compat import timedelta_seconds + +try: + from pytz import utc +except ImportError: + utc = None + +DEFAULT_MAX_INTERVAL = 2 +EPOCH = datetime.utcfromtimestamp(0).replace(tzinfo=utc) +IS_PYPY = hasattr(sys, 'pypy_version_info') + +logger = get_logger(__name__) + +__all__ = ['Entry', 'Timer', 'to_timestamp'] + +scheduled = namedtuple('scheduled', ('eta', 'priority', 'entry')) + + +def to_timestamp(d, default_timezone=utc): + if isinstance(d, datetime): + if d.tzinfo is None: + d = d.replace(tzinfo=default_timezone) + return timedelta_seconds(d - EPOCH) + return d + + +class Entry(object): + if not IS_PYPY: # pragma: no cover + __slots__ = ( + 'fun', 'args', 'kwargs', 'tref', 'cancelled', + '_last_run', '__weakref__', + ) + + def __init__(self, fun, args=None, kwargs=None): + self.fun = fun + self.args = args or [] + self.kwargs = kwargs or {} + self.tref = weakrefproxy(self) + self._last_run = None + self.cancelled = False + + def __call__(self): + return self.fun(*self.args, **self.kwargs) + + def cancel(self): + try: + self.tref.cancelled = True + except ReferenceError: # pragma: no cover + pass + + def __repr__(self): + return ' id(other) + + def __le__(self, other): + return id(self) <= id(other) + + def __ge__(self, other): + return id(self) >= id(other) + + def __eq__(self, other): + return hash(self) == hash(other) + + def __ne__(self, other): + return not self.__eq__(other) + + +class Timer(object): + """ETA scheduler.""" + Entry = Entry + + on_error = None + + def __init__(self, max_interval=None, on_error=None, **kwargs): + self.max_interval = float(max_interval or DEFAULT_MAX_INTERVAL) + self.on_error = on_error or self.on_error + self._queue = [] + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.stop() + + def call_at(self, eta, fun, args=(), kwargs={}, priority=0): + return self.enter_at(self.Entry(fun, args, kwargs), eta, priority) + + def call_after(self, secs, fun, args=(), kwargs={}, priority=0): + return self.enter_after(secs, self.Entry(fun, args, kwargs), priority) + + def call_repeatedly(self, secs, fun, args=(), kwargs={}, priority=0): + tref = self.Entry(fun, args, kwargs) + + @wraps(fun) + def _reschedules(*args, **kwargs): + last, now = tref._last_run, monotonic() + lsince = (now - tref._last_run) if last else secs + try: + if lsince and lsince >= secs: + tref._last_run = now + return fun(*args, **kwargs) + finally: + if not tref.cancelled: + last = tref._last_run + next = secs - (now - last) if last else secs + self.enter_after(next, tref, priority) + + tref.fun = _reschedules + tref._last_run = None + return self.enter_after(secs, tref, priority) + + def enter_at(self, entry, eta=None, priority=0, time=time): + """Enter function into the scheduler. + + :param entry: Item to enter. + :keyword eta: Scheduled time as a :class:`datetime.datetime` object. + :keyword priority: Unused. + + """ + if eta is None: + eta = time() + if isinstance(eta, datetime): + try: + eta = to_timestamp(eta) + except Exception as exc: + if not self.handle_error(exc): + raise + return + return self._enter(eta, priority, entry) + + def enter_after(self, secs, entry, priority=0, time=time): + return self.enter_at(entry, time() + secs, priority) + + def _enter(self, eta, priority, entry, push=heapq.heappush): + push(self._queue, scheduled(eta, priority, entry)) + return entry + + def apply_entry(self, entry): + try: + entry() + except Exception as exc: + if not self.handle_error(exc): + logger.error('Error in timer: %r', exc, exc_info=True) + + def handle_error(self, exc_info): + if self.on_error: + self.on_error(exc_info) + return True + + def stop(self): + pass + + def __iter__(self, min=min, nowfun=time, + pop=heapq.heappop, push=heapq.heappush): + """This iterator yields a tuple of ``(entry, wait_seconds)``, + where if entry is :const:`None` the caller should wait + for ``wait_seconds`` until it polls the schedule again.""" + max_interval = self.max_interval + queue = self._queue + + while 1: + if queue: + eventA = queue[0] + now, eta = nowfun(), eventA[0] + + if now < eta: + yield min(eta - now, max_interval), None + else: + eventB = pop(queue) + + if eventB is eventA: + entry = eventA[2] + if not entry.cancelled: + yield None, entry + continue + else: + push(queue, eventB) + else: + yield None, None + + def clear(self): + self._queue[:] = [] # atomic, without creating a new list. + + def cancel(self, tref): + tref.cancel() + + def __len__(self): + return len(self._queue) + + def __nonzero__(self): + return True + + @property + def queue(self, _pop=heapq.heappop): + """Snapshot of underlying datastructure.""" + events = list(self._queue) + return [_pop(v) for v in [events] * len(events)] + + @property + def schedule(self): + return self diff --git a/thesisenv/lib/python3.6/site-packages/kombu/clocks.py b/thesisenv/lib/python3.6/site-packages/kombu/clocks.py new file mode 100644 index 0000000..e6de0c4 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/clocks.py @@ -0,0 +1,154 @@ +""" +kombu.clocks +============ + +Logical Clocks and Synchronization. + +""" +from __future__ import absolute_import + +from threading import Lock +from itertools import islice +from operator import itemgetter + +from .five import zip + +__all__ = ['LamportClock', 'timetuple'] + +R_CLOCK = '_lamport(clock={0}, timestamp={1}, id={2} {3!r})' + + +class timetuple(tuple): + """Tuple of event clock information. + + Can be used as part of a heap to keep events ordered. + + :param clock: Event clock value. + :param timestamp: Event UNIX timestamp value. + :param id: Event host id (e.g. ``hostname:pid``). + :param obj: Optional obj to associate with this event. + + """ + __slots__ = () + + def __new__(cls, clock, timestamp, id, obj=None): + return tuple.__new__(cls, (clock, timestamp, id, obj)) + + def __repr__(self): + return R_CLOCK.format(*self) + + def __getnewargs__(self): + return tuple(self) + + def __lt__(self, other): + # 0: clock 1: timestamp 3: process id + try: + A, B = self[0], other[0] + # uses logical clock value first + if A and B: # use logical clock if available + if A == B: # equal clocks use lower process id + return self[2] < other[2] + return A < B + return self[1] < other[1] # ... or use timestamp + except IndexError: + return NotImplemented + + def __gt__(self, other): + return other < self + + def __le__(self, other): + return not other < self + + def __ge__(self, other): + return not self < other + + clock = property(itemgetter(0)) + timestamp = property(itemgetter(1)) + id = property(itemgetter(2)) + obj = property(itemgetter(3)) + + +class LamportClock(object): + """Lamport's logical clock. + + From Wikipedia: + + A Lamport logical clock is a monotonically incrementing software counter + maintained in each process. It follows some simple rules: + + * A process increments its counter before each event in that process; + * When a process sends a message, it includes its counter value with + the message; + * On receiving a message, the receiver process sets its counter to be + greater than the maximum of its own value and the received value + before it considers the message received. + + Conceptually, this logical clock can be thought of as a clock that only + has meaning in relation to messages moving between processes. When a + process receives a message, it resynchronizes its logical clock with + the sender. + + .. seealso:: + + * `Lamport timestamps`_ + + * `Lamports distributed mutex`_ + + .. _`Lamport Timestamps`: http://en.wikipedia.org/wiki/Lamport_timestamps + .. _`Lamports distributed mutex`: http://bit.ly/p99ybE + + *Usage* + + When sending a message use :meth:`forward` to increment the clock, + when receiving a message use :meth:`adjust` to sync with + the time stamp of the incoming message. + + """ + #: The clocks current value. + value = 0 + + def __init__(self, initial_value=0, Lock=Lock): + self.value = initial_value + self.mutex = Lock() + + def adjust(self, other): + with self.mutex: + value = self.value = max(self.value, other) + 1 + return value + + def forward(self): + with self.mutex: + self.value += 1 + return self.value + + def sort_heap(self, h): + """List of tuples containing at least two elements, representing + an event, where the first element is the event's scalar clock value, + and the second element is the id of the process (usually + ``"hostname:pid"``): ``sh([(clock, processid, ...?), (...)])`` + + The list must already be sorted, which is why we refer to it as a + heap. + + The tuple will not be unpacked, so more than two elements can be + present. + + Will return the latest event. + + """ + if h[0][0] == h[1][0]: + same = [] + for PN in zip(h, islice(h, 1, None)): + if PN[0][0] != PN[1][0]: + break # Prev and Next's clocks differ + same.append(PN[0]) + # return first item sorted by process id + return sorted(same, key=lambda event: event[1])[0] + # clock values unique, return first item + return h[0] + + def __str__(self): + return str(self.value) + + def __repr__(self): + return ''.format(self) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/common.py b/thesisenv/lib/python3.6/site-packages/kombu/common.py new file mode 100644 index 0000000..540f321 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/common.py @@ -0,0 +1,407 @@ +""" +kombu.common +============ + +Common Utilities. + +""" +from __future__ import absolute_import + +import os +import socket +import threading + +from collections import deque +from contextlib import contextmanager +from functools import partial +from itertools import count +from uuid import uuid4, uuid3, NAMESPACE_OID + +from amqp import RecoverableConnectionError + +from .entity import Exchange, Queue +from .five import range +from .log import get_logger +from .serialization import registry as serializers +from .utils import uuid + +try: + from _thread import get_ident +except ImportError: # pragma: no cover + try: # noqa + from thread import get_ident # noqa + except ImportError: # pragma: no cover + from dummy_thread import get_ident # noqa + +__all__ = ['Broadcast', 'maybe_declare', 'uuid', + 'itermessages', 'send_reply', + 'collect_replies', 'insured', 'drain_consumer', + 'eventloop'] + +#: Prefetch count can't exceed short. +PREFETCH_COUNT_MAX = 0xFFFF + +logger = get_logger(__name__) + +_node_id = None + + +def get_node_id(): + global _node_id + if _node_id is None: + _node_id = uuid4().int + return _node_id + + +def generate_oid(node_id, process_id, thread_id, instance): + ent = '%x-%x-%x-%x' % (node_id, process_id, thread_id, id(instance)) + return str(uuid3(NAMESPACE_OID, ent)) + + +def oid_from(instance): + return generate_oid(get_node_id(), os.getpid(), get_ident(), instance) + + +class Broadcast(Queue): + """Convenience class used to define broadcast queues. + + Every queue instance will have a unique name, + and both the queue and exchange is configured with auto deletion. + + :keyword name: This is used as the name of the exchange. + :keyword queue: By default a unique id is used for the queue + name for every consumer. You can specify a custom queue + name here. + :keyword \*\*kwargs: See :class:`~kombu.Queue` for a list + of additional keyword arguments supported. + + """ + attrs = Queue.attrs + (('queue', None),) + + def __init__(self, name=None, queue=None, auto_delete=True, + exchange=None, alias=None, **kwargs): + queue = queue or 'bcast.%s' % (uuid(),) + return super(Broadcast, self).__init__( + alias=alias or name, + queue=queue, + name=queue, + auto_delete=auto_delete, + exchange=(exchange if exchange is not None + else Exchange(name, type='fanout')), + **kwargs + ) + + +def declaration_cached(entity, channel): + return entity in channel.connection.client.declared_entities + + +def maybe_declare(entity, channel=None, retry=False, **retry_policy): + is_bound = entity.is_bound + + if not is_bound: + assert channel + entity = entity.bind(channel) + + if channel is None: + assert is_bound + channel = entity.channel + + declared = ident = None + if channel.connection and entity.can_cache_declaration: + declared = channel.connection.client.declared_entities + ident = hash(entity) + if ident in declared: + return False + + if retry: + return _imaybe_declare(entity, declared, ident, + channel, **retry_policy) + return _maybe_declare(entity, declared, ident, channel) + + +def _maybe_declare(entity, declared, ident, channel): + channel = channel or entity.channel + if not channel.connection: + raise RecoverableConnectionError('channel disconnected') + entity.declare() + if declared is not None and ident: + declared.add(ident) + return True + + +def _imaybe_declare(entity, declared, ident, channel, **retry_policy): + return entity.channel.connection.client.ensure( + entity, _maybe_declare, **retry_policy)( + entity, declared, ident, channel) + + +def drain_consumer(consumer, limit=1, timeout=None, callbacks=None): + acc = deque() + + def on_message(body, message): + acc.append((body, message)) + + consumer.callbacks = [on_message] + (callbacks or []) + + with consumer: + for _ in eventloop(consumer.channel.connection.client, + limit=limit, timeout=timeout, ignore_timeouts=True): + try: + yield acc.popleft() + except IndexError: + pass + + +def itermessages(conn, channel, queue, limit=1, timeout=None, + callbacks=None, **kwargs): + return drain_consumer( + conn.Consumer(queues=[queue], channel=channel, **kwargs), + limit=limit, timeout=timeout, callbacks=callbacks, + ) + + +def eventloop(conn, limit=None, timeout=None, ignore_timeouts=False): + """Best practice generator wrapper around ``Connection.drain_events``. + + Able to drain events forever, with a limit, and optionally ignoring + timeout errors (a timeout of 1 is often used in environments where + the socket can get "stuck", and is a best practice for Kombu consumers). + + **Examples** + + ``eventloop`` is a generator:: + + from kombu.common import eventloop + + def run(connection): + it = eventloop(connection, timeout=1, ignore_timeouts=True) + next(it) # one event consumed, or timed out. + + for _ in eventloop(connection, timeout=1, ignore_timeouts=True): + pass # loop forever. + + It also takes an optional limit parameter, and timeout errors + are propagated by default:: + + for _ in eventloop(connection, limit=1, timeout=1): + pass + + .. seealso:: + + :func:`itermessages`, which is an event loop bound to one or more + consumers, that yields any messages received. + + """ + for i in limit and range(limit) or count(): + try: + yield conn.drain_events(timeout=timeout) + except socket.timeout: + if timeout and not ignore_timeouts: # pragma: no cover + raise + + +def send_reply(exchange, req, msg, + producer=None, retry=False, retry_policy=None, **props): + """Send reply for request. + + :param exchange: Reply exchange + :param req: Original request, a message with a ``reply_to`` property. + :param producer: Producer instance + :param retry: If true must retry according to ``reply_policy`` argument. + :param retry_policy: Retry settings. + :param props: Extra properties + + """ + + producer.publish( + msg, exchange=exchange, + retry=retry, retry_policy=retry_policy, + **dict({'routing_key': req.properties['reply_to'], + 'correlation_id': req.properties.get('correlation_id'), + 'serializer': serializers.type_to_name[req.content_type], + 'content_encoding': req.content_encoding}, **props) + ) + + +def collect_replies(conn, channel, queue, *args, **kwargs): + """Generator collecting replies from ``queue``""" + no_ack = kwargs.setdefault('no_ack', True) + received = False + try: + for body, message in itermessages(conn, channel, queue, + *args, **kwargs): + if not no_ack: + message.ack() + received = True + yield body + finally: + if received: + channel.after_reply_message_received(queue.name) + + +def _ensure_errback(exc, interval): + logger.error( + 'Connection error: %r. Retry in %ss\n', exc, interval, + exc_info=True, + ) + + +@contextmanager +def _ignore_errors(conn): + try: + yield + except conn.connection_errors + conn.channel_errors: + pass + + +def ignore_errors(conn, fun=None, *args, **kwargs): + """Ignore connection and channel errors. + + The first argument must be a connection object, or any other object + with ``connection_error`` and ``channel_error`` attributes. + + Can be used as a function: + + .. code-block:: python + + def example(connection): + ignore_errors(connection, consumer.channel.close) + + or as a context manager: + + .. code-block:: python + + def example(connection): + with ignore_errors(connection): + consumer.channel.close() + + + .. note:: + + Connection and channel errors should be properly handled, + and not ignored. Using this function is only acceptable in a cleanup + phase, like when a connection is lost or at shutdown. + + """ + if fun: + with _ignore_errors(conn): + return fun(*args, **kwargs) + return _ignore_errors(conn) + + +def revive_connection(connection, channel, on_revive=None): + if on_revive: + on_revive(channel) + + +def insured(pool, fun, args, kwargs, errback=None, on_revive=None, **opts): + """Ensures function performing broker commands completes + despite intermittent connection failures.""" + errback = errback or _ensure_errback + + with pool.acquire(block=True) as conn: + conn.ensure_connection(errback=errback) + # we cache the channel for subsequent calls, this has to be + # reset on revival. + channel = conn.default_channel + revive = partial(revive_connection, conn, on_revive=on_revive) + insured = conn.autoretry(fun, channel, errback=errback, + on_revive=revive, **opts) + retval, _ = insured(*args, **dict(kwargs, connection=conn)) + return retval + + +class QoS(object): + """Thread safe increment/decrement of a channels prefetch_count. + + :param callback: Function used to set new prefetch count, + e.g. ``consumer.qos`` or ``channel.basic_qos``. Will be called + with a single ``prefetch_count`` keyword argument. + :param initial_value: Initial prefetch count value. + + **Example usage** + + .. code-block:: python + + >>> from kombu import Consumer, Connection + >>> connection = Connection('amqp://') + >>> consumer = Consumer(connection) + >>> qos = QoS(consumer.qos, initial_prefetch_count=2) + >>> qos.update() # set initial + + >>> qos.value + 2 + + >>> def in_some_thread(): + ... qos.increment_eventually() + + >>> def in_some_other_thread(): + ... qos.decrement_eventually() + + >>> while 1: + ... if qos.prev != qos.value: + ... qos.update() # prefetch changed so update. + + It can be used with any function supporting a ``prefetch_count`` keyword + argument:: + + >>> channel = connection.channel() + >>> QoS(channel.basic_qos, 10) + + + >>> def set_qos(prefetch_count): + ... print('prefetch count now: %r' % (prefetch_count, )) + >>> QoS(set_qos, 10) + + """ + prev = None + + def __init__(self, callback, initial_value): + self.callback = callback + self._mutex = threading.RLock() + self.value = initial_value or 0 + + def increment_eventually(self, n=1): + """Increment the value, but do not update the channels QoS. + + The MainThread will be responsible for calling :meth:`update` + when necessary. + + """ + with self._mutex: + if self.value: + self.value = self.value + max(n, 0) + return self.value + + def decrement_eventually(self, n=1): + """Decrement the value, but do not update the channels QoS. + + The MainThread will be responsible for calling :meth:`update` + when necessary. + + """ + with self._mutex: + if self.value: + self.value -= n + if self.value < 1: + self.value = 1 + return self.value + + def set(self, pcount): + """Set channel prefetch_count setting.""" + if pcount != self.prev: + new_value = pcount + if pcount > PREFETCH_COUNT_MAX: + logger.warn('QoS: Disabled: prefetch_count exceeds %r', + PREFETCH_COUNT_MAX) + new_value = 0 + logger.debug('basic.qos: prefetch_count->%s', new_value) + self.callback(prefetch_count=new_value) + self.prev = pcount + return pcount + + def update(self): + """Update prefetch count with current value.""" + with self._mutex: + return self.set(self.value) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/compat.py b/thesisenv/lib/python3.6/site-packages/kombu/compat.py new file mode 100644 index 0000000..7347e9b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/compat.py @@ -0,0 +1,215 @@ +""" +kombu.compat +============ + +Carrot compatible interface for :class:`Publisher` and :class:`Producer`. + +See http://packages.python.org/pypi/carrot for documentation. + +""" +from __future__ import absolute_import + +from itertools import count + +from . import messaging +from .entity import Exchange, Queue +from .five import items + +__all__ = ['Publisher', 'Consumer'] + +# XXX compat attribute +entry_to_queue = Queue.from_dict + + +def _iterconsume(connection, consumer, no_ack=False, limit=None): + consumer.consume(no_ack=no_ack) + for iteration in count(0): # for infinity + if limit and iteration >= limit: + raise StopIteration + yield connection.drain_events() + + +class Publisher(messaging.Producer): + exchange = '' + exchange_type = 'direct' + routing_key = '' + durable = True + auto_delete = False + _closed = False + + def __init__(self, connection, exchange=None, routing_key=None, + exchange_type=None, durable=None, auto_delete=None, + channel=None, **kwargs): + if channel: + connection = channel + + self.exchange = exchange or self.exchange + self.exchange_type = exchange_type or self.exchange_type + self.routing_key = routing_key or self.routing_key + + if auto_delete is not None: + self.auto_delete = auto_delete + if durable is not None: + self.durable = durable + + if not isinstance(self.exchange, Exchange): + self.exchange = Exchange(name=self.exchange, + type=self.exchange_type, + routing_key=self.routing_key, + auto_delete=self.auto_delete, + durable=self.durable) + super(Publisher, self).__init__(connection, self.exchange, **kwargs) + + def send(self, *args, **kwargs): + return self.publish(*args, **kwargs) + + def close(self): + super(Publisher, self).close() + self._closed = True + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + + @property + def backend(self): + return self.channel + + +class Consumer(messaging.Consumer): + queue = '' + exchange = '' + routing_key = '' + exchange_type = 'direct' + durable = True + exclusive = False + auto_delete = False + exchange_type = 'direct' + _closed = False + + def __init__(self, connection, queue=None, exchange=None, + routing_key=None, exchange_type=None, durable=None, + exclusive=None, auto_delete=None, **kwargs): + self.backend = connection.channel() + + if durable is not None: + self.durable = durable + if exclusive is not None: + self.exclusive = exclusive + if auto_delete is not None: + self.auto_delete = auto_delete + + self.queue = queue or self.queue + self.exchange = exchange or self.exchange + self.exchange_type = exchange_type or self.exchange_type + self.routing_key = routing_key or self.routing_key + + exchange = Exchange(self.exchange, + type=self.exchange_type, + routing_key=self.routing_key, + auto_delete=self.auto_delete, + durable=self.durable) + queue = Queue(self.queue, + exchange=exchange, + routing_key=self.routing_key, + durable=self.durable, + exclusive=self.exclusive, + auto_delete=self.auto_delete) + super(Consumer, self).__init__(self.backend, queue, **kwargs) + + def revive(self, channel): + self.backend = channel + super(Consumer, self).revive(channel) + + def close(self): + self.cancel() + self.backend.close() + self._closed = True + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + + def __iter__(self): + return self.iterqueue(infinite=True) + + def fetch(self, no_ack=None, enable_callbacks=False): + if no_ack is None: + no_ack = self.no_ack + message = self.queues[0].get(no_ack) + if message: + if enable_callbacks: + self.receive(message.payload, message) + return message + + def process_next(self): + raise NotImplementedError('Use fetch(enable_callbacks=True)') + + def discard_all(self, filterfunc=None): + if filterfunc is not None: + raise NotImplementedError( + 'discard_all does not implement filters') + return self.purge() + + def iterconsume(self, limit=None, no_ack=None): + return _iterconsume(self.connection, self, no_ack, limit) + + def wait(self, limit=None): + it = self.iterconsume(limit) + return list(it) + + def iterqueue(self, limit=None, infinite=False): + for items_since_start in count(): # for infinity + item = self.fetch() + if (not infinite and item is None) or \ + (limit and items_since_start >= limit): + raise StopIteration + yield item + + +class ConsumerSet(messaging.Consumer): + + def __init__(self, connection, from_dict=None, consumers=None, + channel=None, **kwargs): + if channel: + self._provided_channel = True + self.backend = channel + else: + self._provided_channel = False + self.backend = connection.channel() + + queues = [] + if consumers: + for consumer in consumers: + queues.extend(consumer.queues) + if from_dict: + for queue_name, queue_options in items(from_dict): + queues.append(Queue.from_dict(queue_name, **queue_options)) + + super(ConsumerSet, self).__init__(self.backend, queues, **kwargs) + + def iterconsume(self, limit=None, no_ack=False): + return _iterconsume(self.connection, self, no_ack, limit) + + def discard_all(self): + return self.purge() + + def add_consumer_from_dict(self, queue, **options): + return self.add_queue_from_dict(queue, **options) + + def add_consumer(self, consumer): + for queue in consumer.queues: + self.add_queue(queue) + + def revive(self, channel): + self.backend = channel + super(ConsumerSet, self).revive(channel) + + def close(self): + self.cancel() + if not self._provided_channel: + self.channel.close() diff --git a/thesisenv/lib/python3.6/site-packages/kombu/compression.py b/thesisenv/lib/python3.6/site-packages/kombu/compression.py new file mode 100644 index 0000000..866433d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/compression.py @@ -0,0 +1,83 @@ +""" +kombu.compression +================= + +Compression utilities. + +""" +from __future__ import absolute_import + +from kombu.utils.encoding import ensure_bytes + +import zlib + +_aliases = {} +_encoders = {} +_decoders = {} + +__all__ = ['register', 'encoders', 'get_encoder', + 'get_decoder', 'compress', 'decompress'] + + +def register(encoder, decoder, content_type, aliases=[]): + """Register new compression method. + + :param encoder: Function used to compress text. + :param decoder: Function used to decompress previously compressed text. + :param content_type: The mime type this compression method identifies as. + :param aliases: A list of names to associate with this compression method. + + """ + _encoders[content_type] = encoder + _decoders[content_type] = decoder + _aliases.update((alias, content_type) for alias in aliases) + + +def encoders(): + """Return a list of available compression methods.""" + return list(_encoders) + + +def get_encoder(t): + """Get encoder by alias name.""" + t = _aliases.get(t, t) + return _encoders[t], t + + +def get_decoder(t): + """Get decoder by alias name.""" + return _decoders[_aliases.get(t, t)] + + +def compress(body, content_type): + """Compress text. + + :param body: The text to compress. + :param content_type: mime-type of compression method to use. + + """ + encoder, content_type = get_encoder(content_type) + return encoder(ensure_bytes(body)), content_type + + +def decompress(body, content_type): + """Decompress compressed text. + + :param body: Previously compressed text to uncompress. + :param content_type: mime-type of compression method used. + + """ + return get_decoder(content_type)(body) + + +register(zlib.compress, + zlib.decompress, + 'application/x-gzip', aliases=['gzip', 'zlib']) +try: + import bz2 +except ImportError: + pass # Jython? +else: + register(bz2.compress, + bz2.decompress, + 'application/x-bz2', aliases=['bzip2', 'bzip']) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/connection.py b/thesisenv/lib/python3.6/site-packages/kombu/connection.py new file mode 100644 index 0000000..d979338 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/connection.py @@ -0,0 +1,1074 @@ +""" +kombu.connection +================ + +Broker connection and pools. + +""" +from __future__ import absolute_import + +import os +import socket + +from contextlib import contextmanager +from itertools import count, cycle +from operator import itemgetter + +# jython breaks on relative import for .exceptions for some reason +# (Issue #112) +from kombu import exceptions +from .five import Empty, range, string_t, text_t, LifoQueue as _LifoQueue +from .log import get_logger +from .transport import get_transport_cls, supports_librabbitmq +from .utils import cached_property, retry_over_time, shufflecycle, HashedSeq +from .utils.compat import OrderedDict +from .utils.functional import lazy +from .utils.url import as_url, parse_url, quote, urlparse + +__all__ = ['Connection', 'ConnectionPool', 'ChannelPool'] + +RESOLVE_ALIASES = {'pyamqp': 'amqp', + 'librabbitmq': 'amqp'} + +_LOG_CONNECTION = os.environ.get('KOMBU_LOG_CONNECTION', False) +_LOG_CHANNEL = os.environ.get('KOMBU_LOG_CHANNEL', False) + +logger = get_logger(__name__) +roundrobin_failover = cycle + +failover_strategies = { + 'round-robin': roundrobin_failover, + 'shuffle': shufflecycle, +} + + +class Connection(object): + """A connection to the broker. + + :param URL: Broker URL, or a list of URLs, e.g. + + .. code-block:: python + + Connection('amqp://guest:guest@localhost:5672//') + Connection('amqp://foo;amqp://bar', failover_strategy='round-robin') + Connection('redis://', transport_options={ + 'visibility_timeout': 3000, + }) + + import ssl + Connection('amqp://', login_method='EXTERNAL', ssl={ + 'ca_certs': '/etc/pki/tls/certs/something.crt', + 'keyfile': '/etc/something/system.key', + 'certfile': '/etc/something/system.cert', + 'cert_reqs': ssl.CERT_REQUIRED, + }) + + .. admonition:: SSL compatibility + + SSL currently only works with the py-amqp, amqplib, and qpid + transports. For other transports you can use stunnel. + + :keyword ssl: Use SSL to connect to the server. Default is ``False``. + May not be supported by the specified transport. + :keyword transport: Default transport if not specified in the URL. + :keyword connect_timeout: Timeout in seconds for connecting to the + server. May not be supported by the specified transport. + :keyword transport_options: A dict of additional connection arguments to + pass to alternate kombu channel implementations. Consult the transport + documentation for available options. + :keyword heartbeat: Heartbeat interval in int/float seconds. + Note that if heartbeats are enabled then the :meth:`heartbeat_check` + method must be called regularly, around once per second. + + .. note:: + + The connection is established lazily when needed. If you need the + connection to be established, then force it by calling + :meth:`connect`:: + + >>> conn = Connection('amqp://') + >>> conn.connect() + + and always remember to close the connection:: + + >>> conn.release() + + *Legacy options* + + These options have been replaced by the URL argument, but are still + supported for backwards compatibility: + + :keyword hostname: Host name/address. + NOTE: You cannot specify both the URL argument and use the hostname + keyword argument at the same time. + :keyword userid: Default user name if not provided in the URL. + :keyword password: Default password if not provided in the URL. + :keyword virtual_host: Default virtual host if not provided in the URL. + :keyword port: Default port if not provided in the URL. + + """ + port = None + virtual_host = '/' + connect_timeout = 5 + + _closed = None + _connection = None + _default_channel = None + _transport = None + _logger = False + uri_prefix = None + + #: The cache of declared entities is per connection, + #: in case the server loses data. + declared_entities = None + + #: Iterator returning the next broker URL to try in the event + #: of connection failure (initialized by :attr:`failover_strategy`). + cycle = None + + #: Additional transport specific options, + #: passed on to the transport instance. + transport_options = None + + #: Strategy used to select new hosts when reconnecting after connection + #: failure. One of "round-robin", "shuffle" or any custom iterator + #: constantly yielding new URLs to try. + failover_strategy = 'round-robin' + + #: Map of failover strategy name to Callable + failover_strategies = failover_strategies + + #: Heartbeat value, currently only supported by the py-amqp transport. + heartbeat = None + + + hostname = userid = password = ssl = login_method = None + + def __init__(self, hostname='localhost', userid=None, + password=None, virtual_host=None, port=None, insist=False, + ssl=False, transport=None, connect_timeout=5, + transport_options=None, login_method=None, uri_prefix=None, + heartbeat=0, failover_strategy='round-robin', + alternates=None, **kwargs): + alt = [] if alternates is None else alternates + # have to spell the args out, just to get nice docstrings :( + params = self._initial_params = { + 'hostname': hostname, 'userid': userid, + 'password': password, 'virtual_host': virtual_host, + 'port': port, 'insist': insist, 'ssl': ssl, + 'transport': transport, 'connect_timeout': connect_timeout, + 'login_method': login_method, 'heartbeat': heartbeat + } + + if hostname and not isinstance(hostname, string_t): + alt.extend(hostname) + hostname = alt[0] + if hostname and '://' in hostname: + if ';' in hostname: + alt.extend(hostname.split(';')) + hostname = alt[0] + if '+' in hostname[:hostname.index('://')]: + # e.g. sqla+mysql://root:masterkey@localhost/ + params['transport'], params['hostname'] = \ + hostname.split('+', 1) + transport = self.uri_prefix = params['transport'] + else: + transport = transport or urlparse(hostname).scheme + if not get_transport_cls(transport).can_parse_url: + # we must parse the URL + params.update(parse_url(hostname)) + + params['transport'] = transport + + self._init_params(**params) + + # fallback hosts + self.alt = alt + self._failover_strategy_arg = failover_strategy or 'round-robin' + self.failover_strategy = self.failover_strategies.get( + self._failover_strategy_arg) or failover_strategy + if self.alt: + self.cycle = self.failover_strategy(self.alt) + next(self.cycle) # skip first entry + + if transport_options is None: + transport_options = {} + self.transport_options = transport_options + + if _LOG_CONNECTION: # pragma: no cover + self._logger = True + + if uri_prefix: + self.uri_prefix = uri_prefix + + self.declared_entities = set() + + def switch(self, url): + """Switch connection parameters to use a new URL (does not + reconnect)""" + self.close() + self.declared_entities.clear() + self._closed = False + self._init_params(**dict(self._initial_params, **parse_url(url))) + + def maybe_switch_next(self): + """Switch to next URL given by the current failover strategy (if + any).""" + if self.cycle: + self.switch(next(self.cycle)) + + def _init_params(self, hostname, userid, password, virtual_host, port, + insist, ssl, transport, connect_timeout, + login_method, heartbeat): + transport = transport or 'amqp' + if transport == 'amqp' and supports_librabbitmq(): + transport = 'librabbitmq' + self.hostname = hostname + self.userid = userid + self.password = password + self.login_method = login_method + self.virtual_host = virtual_host or self.virtual_host + self.port = port or self.port + self.insist = insist + self.connect_timeout = connect_timeout + self.ssl = ssl + self.transport_cls = transport + self.heartbeat = heartbeat and float(heartbeat) + + def register_with_event_loop(self, loop): + self.transport.register_with_event_loop(self.connection, loop) + + def _debug(self, msg, *args, **kwargs): + if self._logger: # pragma: no cover + fmt = '[Kombu connection:0x{id:x}] {msg}' + logger.debug(fmt.format(id=id(self), msg=text_t(msg)), + *args, **kwargs) + + def connect(self): + """Establish connection to server immediately.""" + self._closed = False + return self.connection + + def channel(self): + """Create and return a new channel.""" + self._debug('create channel') + chan = self.transport.create_channel(self.connection) + if _LOG_CHANNEL: # pragma: no cover + from .utils.debug import Logwrapped + return Logwrapped(chan, 'kombu.channel', + '[Kombu channel:{0.channel_id}] ') + return chan + + def heartbeat_check(self, rate=2): + """Allow the transport to perform any periodic tasks + required to make heartbeats work. This should be called + approximately every second. + + If the current transport does not support heartbeats then + this is a noop operation. + + :keyword rate: Rate is how often the tick is called + compared to the actual heartbeat value. E.g. if + the heartbeat is set to 3 seconds, and the tick + is called every 3 / 2 seconds, then the rate is 2. + This value is currently unused by any transports. + + """ + return self.transport.heartbeat_check(self.connection, rate=rate) + + def drain_events(self, **kwargs): + """Wait for a single event from the server. + + :keyword timeout: Timeout in seconds before we give up. + + + :raises :exc:`socket.timeout`: if the timeout is exceeded. + + """ + return self.transport.drain_events(self.connection, **kwargs) + + def maybe_close_channel(self, channel): + """Close given channel, but ignore connection and channel errors.""" + try: + channel.close() + except (self.connection_errors + self.channel_errors): + pass + + def _do_close_self(self): + # Close only connection and channel(s), but not transport. + self.declared_entities.clear() + if self._default_channel: + self.maybe_close_channel(self._default_channel) + if self._connection: + try: + self.transport.close_connection(self._connection) + except self.connection_errors + (AttributeError, socket.error): + pass + self._connection = None + + def _close(self): + """Really close connection, even if part of a connection pool.""" + self._do_close_self() + if self._transport: + self._transport.client = None + self._transport = None + self._debug('closed') + self._closed = True + + def collect(self, socket_timeout=None): + # amqp requires communication to close, we don't need that just + # to clear out references, Transport._collect can also be implemented + # by other transports that want fast after fork + try: + gc_transport = self._transport._collect + except AttributeError: + _timeo = socket.getdefaulttimeout() + socket.setdefaulttimeout(socket_timeout) + try: + self._close() + except socket.timeout: + pass + finally: + socket.setdefaulttimeout(_timeo) + else: + gc_transport(self._connection) + if self._transport: + self._transport.client = None + self._transport = None + self.declared_entities.clear() + self._connection = None + + def release(self): + """Close the connection (if open).""" + self._close() + close = release + + def ensure_connection(self, errback=None, max_retries=None, + interval_start=2, interval_step=2, interval_max=30, + callback=None): + """Ensure we have a connection to the server. + + If not retry establishing the connection with the settings + specified. + + :keyword errback: Optional callback called each time the connection + can't be established. Arguments provided are the exception + raised and the interval that will be slept ``(exc, interval)``. + + :keyword max_retries: Maximum number of times to retry. + If this limit is exceeded the connection error will be re-raised. + + :keyword interval_start: The number of seconds we start sleeping for. + :keyword interval_step: How many seconds added to the interval + for each retry. + :keyword interval_max: Maximum number of seconds to sleep between + each retry. + :keyword callback: Optional callback that is called for every + internal iteration (1 s) + + """ + def on_error(exc, intervals, retries, interval=0): + round = self.completes_cycle(retries) + if round: + interval = next(intervals) + if errback: + errback(exc, interval) + self.maybe_switch_next() # select next host + + return interval if round else 0 + + retry_over_time(self.connect, self.recoverable_connection_errors, + (), {}, on_error, max_retries, + interval_start, interval_step, interval_max, callback) + return self + + def completes_cycle(self, retries): + """Return true if the cycle is complete after number of `retries`.""" + return not (retries + 1) % len(self.alt) if self.alt else True + + def revive(self, new_channel): + """Revive connection after connection re-established.""" + if self._default_channel: + self.maybe_close_channel(self._default_channel) + self._default_channel = None + + def _default_ensure_callback(self, exc, interval): + logger.error("Ensure: Operation error: %r. Retry in %ss", + exc, interval, exc_info=True) + + def ensure(self, obj, fun, errback=None, max_retries=None, + interval_start=1, interval_step=1, interval_max=1, + on_revive=None): + """Ensure operation completes, regardless of any channel/connection + errors occurring. + + Will retry by establishing the connection, and reapplying + the function. + + :param fun: Method to apply. + + :keyword errback: Optional callback called each time the connection + can't be established. Arguments provided are the exception + raised and the interval that will be slept ``(exc, interval)``. + + :keyword max_retries: Maximum number of times to retry. + If this limit is exceeded the connection error will be re-raised. + + :keyword interval_start: The number of seconds we start sleeping for. + :keyword interval_step: How many seconds added to the interval + for each retry. + :keyword interval_max: Maximum number of seconds to sleep between + each retry. + + **Example** + + This is an example ensuring a publish operation:: + + >>> from kombu import Connection, Producer + >>> conn = Connection('amqp://') + >>> producer = Producer(conn) + + >>> def errback(exc, interval): + ... logger.error('Error: %r', exc, exc_info=1) + ... logger.info('Retry in %s seconds.', interval) + + >>> publish = conn.ensure(producer, producer.publish, + ... errback=errback, max_retries=3) + >>> publish({'hello': 'world'}, routing_key='dest') + + """ + def _ensured(*args, **kwargs): + got_connection = 0 + conn_errors = self.recoverable_connection_errors + chan_errors = self.recoverable_channel_errors + has_modern_errors = hasattr( + self.transport, 'recoverable_connection_errors', + ) + for retries in count(0): # for infinity + try: + return fun(*args, **kwargs) + except conn_errors as exc: + if got_connection and not has_modern_errors: + # transport can not distinguish between + # recoverable/irrecoverable errors, so we propagate + # the error if it persists after a new connection was + # successfully established. + raise + if max_retries is not None and retries > max_retries: + raise + self._debug('ensure connection error: %r', exc, exc_info=1) + self._connection = None + self._do_close_self() + errback and errback(exc, 0) + remaining_retries = None + if max_retries is not None: + remaining_retries = max(max_retries - retries, 1) + self.ensure_connection(errback, + remaining_retries, + interval_start, + interval_step, + interval_max) + new_channel = self.channel() + self.revive(new_channel) + obj.revive(new_channel) + if on_revive: + on_revive(new_channel) + got_connection += 1 + except chan_errors as exc: + if max_retries is not None and retries > max_retries: + raise + self._debug('ensure channel error: %r', exc, exc_info=1) + errback and errback(exc, 0) + _ensured.__name__ = "%s(ensured)" % fun.__name__ + _ensured.__doc__ = fun.__doc__ + _ensured.__module__ = fun.__module__ + return _ensured + + def autoretry(self, fun, channel=None, **ensure_options): + """Decorator for functions supporting a ``channel`` keyword argument. + + The resulting callable will retry calling the function if + it raises connection or channel related errors. + The return value will be a tuple of ``(retval, last_created_channel)``. + + If a ``channel`` is not provided, then one will be automatically + acquired (remember to close it afterwards). + + See :meth:`ensure` for the full list of supported keyword arguments. + + Example usage:: + + channel = connection.channel() + try: + ret, channel = connection.autoretry(publish_messages, channel) + finally: + channel.close() + """ + channels = [channel] + create_channel = self.channel + + class Revival(object): + __name__ = getattr(fun, '__name__', None) + __module__ = getattr(fun, '__module__', None) + __doc__ = getattr(fun, '__doc__', None) + + def revive(self, channel): + channels[0] = channel + + def __call__(self, *args, **kwargs): + if channels[0] is None: + self.revive(create_channel()) + return fun(*args, channel=channels[0], **kwargs), channels[0] + + revive = Revival() + return self.ensure(revive, revive, **ensure_options) + + def create_transport(self): + return self.get_transport_cls()(client=self) + + def get_transport_cls(self): + """Get the currently used transport class.""" + transport_cls = self.transport_cls + if not transport_cls or isinstance(transport_cls, string_t): + transport_cls = get_transport_cls(transport_cls) + return transport_cls + + def clone(self, **kwargs): + """Create a copy of the connection with the same connection + settings.""" + return self.__class__(**dict(self._info(resolve=False), **kwargs)) + + def get_heartbeat_interval(self): + return self.transport.get_heartbeat_interval(self.connection) + + def _info(self, resolve=True): + transport_cls = self.transport_cls + if resolve: + transport_cls = RESOLVE_ALIASES.get(transport_cls, transport_cls) + D = self.transport.default_connection_params + + hostname = self.hostname or D.get('hostname') + if self.uri_prefix: + hostname = '%s+%s' % (self.uri_prefix, hostname) + + info = ( + ('hostname', hostname), + ('userid', self.userid or D.get('userid')), + ('password', self.password or D.get('password')), + ('virtual_host', self.virtual_host or D.get('virtual_host')), + ('port', self.port or D.get('port')), + ('insist', self.insist), + ('ssl', self.ssl), + ('transport', transport_cls), + ('connect_timeout', self.connect_timeout), + ('transport_options', self.transport_options), + ('login_method', self.login_method or D.get('login_method')), + ('uri_prefix', self.uri_prefix), + ('heartbeat', self.heartbeat), + ('failover_strategy', self._failover_strategy_arg), + ('alternates', self.alt), + ) + return info + + def info(self): + """Get connection info.""" + return OrderedDict(self._info()) + + def __eqhash__(self): + return HashedSeq(self.transport_cls, self.hostname, self.userid, + self.password, self.virtual_host, self.port, + repr(self.transport_options)) + + def as_uri(self, include_password=False, mask='**', + getfields=itemgetter('port', 'userid', 'password', + 'virtual_host', 'transport')): + """Convert connection parameters to URL form.""" + hostname = self.hostname or 'localhost' + if self.transport.can_parse_url: + if self.uri_prefix: + return '%s+%s' % (self.uri_prefix, hostname) + return self.hostname + if self.uri_prefix: + return '%s+%s' % (self.uri_prefix, hostname) + fields = self.info() + port, userid, password, vhost, transport = getfields(fields) + + return as_url( + transport, hostname, port, userid, password, quote(vhost), + sanitize=not include_password, mask=mask, + ) + + def Pool(self, limit=None, preload=None): + """Pool of connections. + + See :class:`ConnectionPool`. + + :keyword limit: Maximum number of active connections. + Default is no limit. + :keyword preload: Number of connections to preload + when the pool is created. Default is 0. + + *Example usage*:: + + >>> connection = Connection('amqp://') + >>> pool = connection.Pool(2) + >>> c1 = pool.acquire() + >>> c2 = pool.acquire() + >>> c3 = pool.acquire() + Traceback (most recent call last): + File "", line 1, in + File "kombu/connection.py", line 354, in acquire + raise ConnectionLimitExceeded(self.limit) + kombu.exceptions.ConnectionLimitExceeded: 2 + >>> c1.release() + >>> c3 = pool.acquire() + + """ + return ConnectionPool(self, limit, preload) + + def ChannelPool(self, limit=None, preload=None): + """Pool of channels. + + See :class:`ChannelPool`. + + :keyword limit: Maximum number of active channels. + Default is no limit. + :keyword preload: Number of channels to preload + when the pool is created. Default is 0. + + *Example usage*:: + + >>> connection = Connection('amqp://') + >>> pool = connection.ChannelPool(2) + >>> c1 = pool.acquire() + >>> c2 = pool.acquire() + >>> c3 = pool.acquire() + Traceback (most recent call last): + File "", line 1, in + File "kombu/connection.py", line 354, in acquire + raise ChannelLimitExceeded(self.limit) + kombu.connection.ChannelLimitExceeded: 2 + >>> c1.release() + >>> c3 = pool.acquire() + + """ + return ChannelPool(self, limit, preload) + + def Producer(self, channel=None, *args, **kwargs): + """Create new :class:`kombu.Producer` instance using this + connection.""" + from .messaging import Producer + return Producer(channel or self, *args, **kwargs) + + def Consumer(self, queues=None, channel=None, *args, **kwargs): + """Create new :class:`kombu.Consumer` instance using this + connection.""" + from .messaging import Consumer + return Consumer(channel or self, queues, *args, **kwargs) + + def SimpleQueue(self, name, no_ack=None, queue_opts=None, + exchange_opts=None, channel=None, **kwargs): + """Create new :class:`~kombu.simple.SimpleQueue`, using a channel + from this connection. + + If ``name`` is a string, a queue and exchange will be automatically + created using that name as the name of the queue and exchange, + also it will be used as the default routing key. + + :param name: Name of the queue/or a :class:`~kombu.Queue`. + :keyword no_ack: Disable acknowledgements. Default is false. + :keyword queue_opts: Additional keyword arguments passed to the + constructor of the automatically created + :class:`~kombu.Queue`. + :keyword exchange_opts: Additional keyword arguments passed to the + constructor of the automatically created + :class:`~kombu.Exchange`. + :keyword channel: Custom channel to use. If not specified the + connection default channel is used. + + """ + from .simple import SimpleQueue + return SimpleQueue(channel or self, name, no_ack, queue_opts, + exchange_opts, **kwargs) + + def SimpleBuffer(self, name, no_ack=None, queue_opts=None, + exchange_opts=None, channel=None, **kwargs): + """Create new :class:`~kombu.simple.SimpleQueue` using a channel + from this connection. + + Same as :meth:`SimpleQueue`, but configured with buffering + semantics. The resulting queue and exchange will not be durable, also + auto delete is enabled. Messages will be transient (not persistent), + and acknowledgements are disabled (``no_ack``). + + """ + from .simple import SimpleBuffer + return SimpleBuffer(channel or self, name, no_ack, queue_opts, + exchange_opts, **kwargs) + + def _establish_connection(self): + self._debug('establishing connection...') + conn = self.transport.establish_connection() + self._debug('connection established: %r', conn) + return conn + + def __repr__(self): + """``x.__repr__() <==> repr(x)``""" + return ''.format(self.as_uri(), id(self)) + + def __copy__(self): + """``x.__copy__() <==> copy(x)``""" + return self.clone() + + def __reduce__(self): + return self.__class__, tuple(self.info().values()), None + + def __enter__(self): + return self + + def __exit__(self, *args): + self.release() + + @property + def qos_semantics_matches_spec(self): + return self.transport.qos_semantics_matches_spec(self.connection) + + @property + def connected(self): + """Return true if the connection has been established.""" + return (not self._closed and + self._connection is not None and + self.transport.verify_connection(self._connection)) + + @property + def connection(self): + """The underlying connection object. + + .. warning:: + This instance is transport specific, so do not + depend on the interface of this object. + + """ + if not self._closed: + if not self.connected: + self.declared_entities.clear() + self._default_channel = None + self._connection = self._establish_connection() + self._closed = False + return self._connection + + @property + def default_channel(self): + """Default channel, created upon access and closed when the connection + is closed. + + Can be used for automatic channel handling when you only need one + channel, and also it is the channel implicitly used if a connection + is passed instead of a channel, to functions that require a channel. + + """ + # make sure we're still connected, and if not refresh. + self.connection + if self._default_channel is None: + self._default_channel = self.channel() + return self._default_channel + + @property + def host(self): + """The host as a host name/port pair separated by colon.""" + return ':'.join([self.hostname, str(self.port)]) + + @property + def transport(self): + if self._transport is None: + self._transport = self.create_transport() + return self._transport + + @cached_property + def manager(self): + """Experimental manager that can be used to manage/monitor the broker + instance. Not available for all transports.""" + return self.transport.manager + + def get_manager(self, *args, **kwargs): + return self.transport.get_manager(*args, **kwargs) + + @cached_property + def recoverable_connection_errors(self): + """List of connection related exceptions that can be recovered from, + but where the connection must be closed and re-established first.""" + try: + return self.transport.recoverable_connection_errors + except AttributeError: + # There were no such classification before, + # and all errors were assumed to be recoverable, + # so this is a fallback for transports that do + # not support the new recoverable/irrecoverable classes. + return self.connection_errors + self.channel_errors + + @cached_property + def recoverable_channel_errors(self): + """List of channel related exceptions that can be automatically + recovered from without re-establishing the connection.""" + try: + return self.transport.recoverable_channel_errors + except AttributeError: + return () + + @cached_property + def connection_errors(self): + """List of exceptions that may be raised by the connection.""" + return self.transport.connection_errors + + @cached_property + def channel_errors(self): + """List of exceptions that may be raised by the channel.""" + return self.transport.channel_errors + + @property + def supports_heartbeats(self): + return self.transport.supports_heartbeats + + @property + def is_evented(self): + return self.transport.supports_ev +BrokerConnection = Connection + + +class Resource(object): + LimitExceeded = exceptions.LimitExceeded + + def __init__(self, limit=None, preload=None): + self.limit = limit + self.preload = preload or 0 + self._closed = False + + self._resource = _LifoQueue() + self._dirty = set() + self.setup() + + def setup(self): + raise NotImplementedError('subclass responsibility') + + def _add_when_empty(self): + if self.limit and len(self._dirty) >= self.limit: + raise self.LimitExceeded(self.limit) + # All taken, put new on the queue and + # try get again, this way the first in line + # will get the resource. + self._resource.put_nowait(self.new()) + + def acquire(self, block=False, timeout=None): + """Acquire resource. + + :keyword block: If the limit is exceeded, + block until there is an available item. + :keyword timeout: Timeout to wait + if ``block`` is true. Default is :const:`None` (forever). + + :raises LimitExceeded: if block is false + and the limit has been exceeded. + + """ + if self._closed: + raise RuntimeError('Acquire on closed pool') + if self.limit: + while 1: + try: + R = self._resource.get(block=block, timeout=timeout) + except Empty: + self._add_when_empty() + else: + try: + R = self.prepare(R) + except BaseException: + if isinstance(R, lazy): + # no evaluated yet, just put it back + self._resource.put_nowait(R) + else: + # evaluted so must try to release/close first. + self.release(R) + raise + self._dirty.add(R) + break + else: + R = self.prepare(self.new()) + + def release(): + """Release resource so it can be used by another thread. + + The caller is responsible for discarding the object, + and to never use the resource again. A new resource must + be acquired if so needed. + + """ + self.release(R) + R.release = release + + return R + + def prepare(self, resource): + return resource + + def close_resource(self, resource): + resource.close() + + def release_resource(self, resource): + pass + + def replace(self, resource): + """Replace resource with a new instance. This can be used in case + of defective resources.""" + if self.limit: + self._dirty.discard(resource) + self.close_resource(resource) + + def release(self, resource): + if self.limit: + self._dirty.discard(resource) + self._resource.put_nowait(resource) + self.release_resource(resource) + else: + self.close_resource(resource) + + def collect_resource(self, resource): + pass + + def force_close_all(self): + """Close and remove all resources in the pool (also those in use). + + Can be used to close resources from parent processes + after fork (e.g. sockets/connections). + + """ + self._closed = True + dirty = self._dirty + resource = self._resource + while 1: # - acquired + try: + dres = dirty.pop() + except KeyError: + break + try: + self.collect_resource(dres) + except AttributeError: # Issue #78 + pass + while 1: # - available + # deque supports '.clear', but lists do not, so for that + # reason we use pop here, so that the underlying object can + # be any object supporting '.pop' and '.append'. + try: + res = resource.queue.pop() + except IndexError: + break + try: + self.collect_resource(res) + except AttributeError: + pass # Issue #78 + + if os.environ.get('KOMBU_DEBUG_POOL'): # pragma: no cover + _orig_acquire = acquire + _orig_release = release + + _next_resource_id = 0 + + def acquire(self, *args, **kwargs): # noqa + import traceback + id = self._next_resource_id = self._next_resource_id + 1 + print('+{0} ACQUIRE {1}'.format(id, self.__class__.__name__)) + r = self._orig_acquire(*args, **kwargs) + r._resource_id = id + print('-{0} ACQUIRE {1}'.format(id, self.__class__.__name__)) + if not hasattr(r, 'acquired_by'): + r.acquired_by = [] + r.acquired_by.append(traceback.format_stack()) + return r + + def release(self, resource): # noqa + id = resource._resource_id + print('+{0} RELEASE {1}'.format(id, self.__class__.__name__)) + r = self._orig_release(resource) + print('-{0} RELEASE {1}'.format(id, self.__class__.__name__)) + self._next_resource_id -= 1 + return r + + +class ConnectionPool(Resource): + LimitExceeded = exceptions.ConnectionLimitExceeded + + def __init__(self, connection, limit=None, preload=None): + self.connection = connection + super(ConnectionPool, self).__init__(limit=limit, + preload=preload) + + def new(self): + return self.connection.clone() + + def release_resource(self, resource): + try: + resource._debug('released') + except AttributeError: + pass + + def close_resource(self, resource): + resource._close() + + def collect_resource(self, resource, socket_timeout=0.1): + return resource.collect(socket_timeout) + + @contextmanager + def acquire_channel(self, block=False): + with self.acquire(block=block) as connection: + yield connection, connection.default_channel + + def setup(self): + if self.limit: + for i in range(self.limit): + if i < self.preload: + conn = self.new() + conn.connect() + else: + conn = lazy(self.new) + self._resource.put_nowait(conn) + + def prepare(self, resource): + if callable(resource): + resource = resource() + resource._debug('acquired') + return resource + + +class ChannelPool(Resource): + LimitExceeded = exceptions.ChannelLimitExceeded + + def __init__(self, connection, limit=None, preload=None): + self.connection = connection + super(ChannelPool, self).__init__(limit=limit, + preload=preload) + + def new(self): + return lazy(self.connection.channel) + + def setup(self): + channel = self.new() + if self.limit: + for i in range(self.limit): + self._resource.put_nowait( + i < self.preload and channel() or lazy(channel)) + + def prepare(self, channel): + if callable(channel): + channel = channel() + return channel + + +def maybe_channel(channel): + """Return the default channel if argument is a connection instance, + otherwise just return the channel given.""" + if isinstance(channel, Connection): + return channel.default_channel + return channel + + +def is_connection(obj): + return isinstance(obj, Connection) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/entity.py b/thesisenv/lib/python3.6/site-packages/kombu/entity.py new file mode 100644 index 0000000..066dc53 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/entity.py @@ -0,0 +1,748 @@ +""" +kombu.entity +================ + +Exchange and Queue declarations. + +""" +from __future__ import absolute_import + +from .abstract import MaybeChannelBound, Object +from .exceptions import ContentDisallowed +from .five import string_t +from .serialization import prepare_accept_content + +TRANSIENT_DELIVERY_MODE = 1 +PERSISTENT_DELIVERY_MODE = 2 +DELIVERY_MODES = {'transient': TRANSIENT_DELIVERY_MODE, + 'persistent': PERSISTENT_DELIVERY_MODE} + +__all__ = ['Exchange', 'Queue', 'binding'] + + +def _reprstr(s): + s = repr(s) + if isinstance(s, string_t) and s.startswith("u'"): + return s[2:-1] + return s[1:-1] + + +def pretty_bindings(bindings): + return '[%s]' % (', '.join(map(str, bindings))) + + +class Exchange(MaybeChannelBound): + """An Exchange declaration. + + :keyword name: See :attr:`name`. + :keyword type: See :attr:`type`. + :keyword channel: See :attr:`channel`. + :keyword durable: See :attr:`durable`. + :keyword auto_delete: See :attr:`auto_delete`. + :keyword delivery_mode: See :attr:`delivery_mode`. + :keyword arguments: See :attr:`arguments`. + + .. attribute:: name + + Name of the exchange. Default is no name (the default exchange). + + .. attribute:: type + + *This description of AMQP exchange types was shamelessly stolen + from the blog post `AMQP in 10 minutes: Part 4`_ by + Rajith Attapattu. Reading this article is recommended if you're + new to amqp.* + + "AMQP defines four default exchange types (routing algorithms) that + covers most of the common messaging use cases. An AMQP broker can + also define additional exchange types, so see your broker + manual for more information about available exchange types. + + * `direct` (*default*) + + Direct match between the routing key in the message, and the + routing criteria used when a queue is bound to this exchange. + + * `topic` + + Wildcard match between the routing key and the routing pattern + specified in the exchange/queue binding. The routing key is + treated as zero or more words delimited by `"."` and + supports special wildcard characters. `"*"` matches a + single word and `"#"` matches zero or more words. + + * `fanout` + + Queues are bound to this exchange with no arguments. Hence any + message sent to this exchange will be forwarded to all queues + bound to this exchange. + + * `headers` + + Queues are bound to this exchange with a table of arguments + containing headers and values (optional). A special argument + named "x-match" determines the matching algorithm, where + `"all"` implies an `AND` (all pairs must match) and + `"any"` implies `OR` (at least one pair must match). + + :attr:`arguments` is used to specify the arguments. + + + .. _`AMQP in 10 minutes: Part 4`: + http://bit.ly/amqp-exchange-types + + .. attribute:: channel + + The channel the exchange is bound to (if bound). + + .. attribute:: durable + + Durable exchanges remain active when a server restarts. Non-durable + exchanges (transient exchanges) are purged when a server restarts. + Default is :const:`True`. + + .. attribute:: auto_delete + + If set, the exchange is deleted when all queues have finished + using it. Default is :const:`False`. + + .. attribute:: delivery_mode + + The default delivery mode used for messages. The value is an integer, + or alias string. + + * 1 or `"transient"` + + The message is transient. Which means it is stored in + memory only, and is lost if the server dies or restarts. + + * 2 or "persistent" (*default*) + The message is persistent. Which means the message is + stored both in-memory, and on disk, and therefore + preserved if the server dies or restarts. + + The default value is 2 (persistent). + + .. attribute:: arguments + + Additional arguments to specify when the exchange is declared. + + """ + TRANSIENT_DELIVERY_MODE = TRANSIENT_DELIVERY_MODE + PERSISTENT_DELIVERY_MODE = PERSISTENT_DELIVERY_MODE + + name = '' + type = 'direct' + durable = True + auto_delete = False + passive = False + delivery_mode = PERSISTENT_DELIVERY_MODE + + attrs = ( + ('name', None), + ('type', None), + ('arguments', None), + ('durable', bool), + ('passive', bool), + ('auto_delete', bool), + ('delivery_mode', lambda m: DELIVERY_MODES.get(m) or m), + ) + + def __init__(self, name='', type='', channel=None, **kwargs): + super(Exchange, self).__init__(**kwargs) + self.name = name or self.name + self.type = type or self.type + self.maybe_bind(channel) + + def __hash__(self): + return hash('E|%s' % (self.name, )) + + def declare(self, nowait=False, passive=None): + """Declare the exchange. + + Creates the exchange on the broker. + + :keyword nowait: If set the server will not respond, and a + response will not be waited for. Default is :const:`False`. + + """ + passive = self.passive if passive is None else passive + if self.name: + return self.channel.exchange_declare( + exchange=self.name, type=self.type, durable=self.durable, + auto_delete=self.auto_delete, arguments=self.arguments, + nowait=nowait, passive=passive, + ) + + def bind_to(self, exchange='', routing_key='', + arguments=None, nowait=False, **kwargs): + """Binds the exchange to another exchange. + + :keyword nowait: If set the server will not respond, and the call + will not block waiting for a response. Default is :const:`False`. + + """ + if isinstance(exchange, Exchange): + exchange = exchange.name + return self.channel.exchange_bind(destination=self.name, + source=exchange, + routing_key=routing_key, + nowait=nowait, + arguments=arguments) + + def unbind_from(self, source='', routing_key='', + nowait=False, arguments=None): + """Delete previously created exchange binding from the server.""" + if isinstance(source, Exchange): + source = source.name + return self.channel.exchange_unbind(destination=self.name, + source=source, + routing_key=routing_key, + nowait=nowait, + arguments=arguments) + + def Message(self, body, delivery_mode=None, priority=None, + content_type=None, content_encoding=None, + properties=None, headers=None): + """Create message instance to be sent with :meth:`publish`. + + :param body: Message body. + + :keyword delivery_mode: Set custom delivery mode. Defaults + to :attr:`delivery_mode`. + + :keyword priority: Message priority, 0 to 9. (currently not + supported by RabbitMQ). + + :keyword content_type: The messages content_type. If content_type + is set, no serialization occurs as it is assumed this is either + a binary object, or you've done your own serialization. + Leave blank if using built-in serialization as our library + properly sets content_type. + + :keyword content_encoding: The character set in which this object + is encoded. Use "binary" if sending in raw binary objects. + Leave blank if using built-in serialization as our library + properly sets content_encoding. + + :keyword properties: Message properties. + + :keyword headers: Message headers. + + """ + properties = {} if properties is None else properties + dm = delivery_mode or self.delivery_mode + properties['delivery_mode'] = \ + DELIVERY_MODES[dm] if (dm != 2 and dm != 1) else dm + return self.channel.prepare_message(body, + properties=properties, + priority=priority, + content_type=content_type, + content_encoding=content_encoding, + headers=headers) + + def publish(self, message, routing_key=None, mandatory=False, + immediate=False, exchange=None): + """Publish message. + + :param message: :meth:`Message` instance to publish. + :param routing_key: Routing key. + :param mandatory: Currently not supported. + :param immediate: Currently not supported. + + """ + exchange = exchange or self.name + return self.channel.basic_publish(message, + exchange=exchange, + routing_key=routing_key, + mandatory=mandatory, + immediate=immediate) + + def delete(self, if_unused=False, nowait=False): + """Delete the exchange declaration on server. + + :keyword if_unused: Delete only if the exchange has no bindings. + Default is :const:`False`. + + :keyword nowait: If set the server will not respond, and a + response will not be waited for. Default is :const:`False`. + + """ + return self.channel.exchange_delete(exchange=self.name, + if_unused=if_unused, + nowait=nowait) + + def binding(self, routing_key='', arguments=None, unbind_arguments=None): + return binding(self, routing_key, arguments, unbind_arguments) + + def __eq__(self, other): + if isinstance(other, Exchange): + return (self.name == other.name and + self.type == other.type and + self.arguments == other.arguments and + self.durable == other.durable and + self.auto_delete == other.auto_delete and + self.delivery_mode == other.delivery_mode) + return NotImplemented + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return super(Exchange, self).__repr__(str(self)) + + def __str__(self): + return 'Exchange %s(%s)' % (_reprstr(self.name) or repr(''), self.type) + + @property + def can_cache_declaration(self): + return not self.auto_delete + + +class binding(Object): + """Represents a queue or exchange binding. + + :keyword exchange: Exchange to bind to. + :keyword routing_key: Routing key used as binding key. + :keyword arguments: Arguments for bind operation. + :keyword unbind_arguments: Arguments for unbind operation. + + """ + + attrs = ( + ('exchange', None), + ('routing_key', None), + ('arguments', None), + ('unbind_arguments', None) + ) + + def __init__(self, exchange=None, routing_key='', + arguments=None, unbind_arguments=None): + self.exchange = exchange + self.routing_key = routing_key + self.arguments = arguments + self.unbind_arguments = unbind_arguments + + def declare(self, channel, nowait=False): + """Declare destination exchange.""" + if self.exchange and self.exchange.name: + ex = self.exchange(channel) + ex.declare(nowait=nowait) + + def bind(self, entity, nowait=False): + """Bind entity to this binding.""" + entity.bind_to(exchange=self.exchange, + routing_key=self.routing_key, + arguments=self.arguments, + nowait=nowait) + + def unbind(self, entity, nowait=False): + """Unbind entity from this binding.""" + entity.unbind_from(self.exchange, + routing_key=self.routing_key, + arguments=self.unbind_arguments, + nowait=nowait) + + def __repr__(self): + return '' % (self, ) + + def __str__(self): + return '%s->%s' % ( + _reprstr(self.exchange.name), _reprstr(self.routing_key), + ) + + +class Queue(MaybeChannelBound): + """A Queue declaration. + + :keyword name: See :attr:`name`. + :keyword exchange: See :attr:`exchange`. + :keyword routing_key: See :attr:`routing_key`. + :keyword channel: See :attr:`channel`. + :keyword durable: See :attr:`durable`. + :keyword exclusive: See :attr:`exclusive`. + :keyword auto_delete: See :attr:`auto_delete`. + :keyword queue_arguments: See :attr:`queue_arguments`. + :keyword binding_arguments: See :attr:`binding_arguments`. + :keyword on_declared: See :attr:`on_declared` + + .. attribute:: name + + Name of the queue. Default is no name (default queue destination). + + .. attribute:: exchange + + The :class:`Exchange` the queue binds to. + + .. attribute:: routing_key + + The routing key (if any), also called *binding key*. + + The interpretation of the routing key depends on + the :attr:`Exchange.type`. + + * direct exchange + + Matches if the routing key property of the message and + the :attr:`routing_key` attribute are identical. + + * fanout exchange + + Always matches, even if the binding does not have a key. + + * topic exchange + + Matches the routing key property of the message by a primitive + pattern matching scheme. The message routing key then consists + of words separated by dots (`"."`, like domain names), and + two special characters are available; star (`"*"`) and hash + (`"#"`). The star matches any word, and the hash matches + zero or more words. For example `"*.stock.#"` matches the + routing keys `"usd.stock"` and `"eur.stock.db"` but not + `"stock.nasdaq"`. + + .. attribute:: channel + + The channel the Queue is bound to (if bound). + + .. attribute:: durable + + Durable queues remain active when a server restarts. + Non-durable queues (transient queues) are purged if/when + a server restarts. + Note that durable queues do not necessarily hold persistent + messages, although it does not make sense to send + persistent messages to a transient queue. + + Default is :const:`True`. + + .. attribute:: exclusive + + Exclusive queues may only be consumed from by the + current connection. Setting the 'exclusive' flag + always implies 'auto-delete'. + + Default is :const:`False`. + + .. attribute:: auto_delete + + If set, the queue is deleted when all consumers have + finished using it. Last consumer can be cancelled + either explicitly or because its channel is closed. If + there was no consumer ever on the queue, it won't be + deleted. + + .. attribute:: queue_arguments + + Additional arguments used when declaring the queue. + + .. attribute:: binding_arguments + + Additional arguments used when binding the queue. + + .. attribute:: alias + + Unused in Kombu, but applications can take advantage of this. + For example to give alternate names to queues with automatically + generated queue names. + + .. attribute:: on_declared + + Optional callback to be applied when the queue has been + declared (the ``queue_declare`` operation is complete). + This must be a function with a signature that accepts at least 3 + positional arguments: ``(name, messages, consumers)``. + + """ + ContentDisallowed = ContentDisallowed + + name = '' + exchange = Exchange('') + routing_key = '' + + durable = True + exclusive = False + auto_delete = False + no_ack = False + + attrs = ( + ('name', None), + ('exchange', None), + ('routing_key', None), + ('queue_arguments', None), + ('binding_arguments', None), + ('durable', bool), + ('exclusive', bool), + ('auto_delete', bool), + ('no_ack', None), + ('alias', None), + ('bindings', list), + ) + + def __init__(self, name='', exchange=None, routing_key='', + channel=None, bindings=None, on_declared=None, + **kwargs): + super(Queue, self).__init__(**kwargs) + self.name = name or self.name + self.exchange = exchange or self.exchange + self.routing_key = routing_key or self.routing_key + self.bindings = set(bindings or []) + self.on_declared = on_declared + + # allows Queue('name', [binding(...), binding(...), ...]) + if isinstance(exchange, (list, tuple, set)): + self.bindings |= set(exchange) + if self.bindings: + self.exchange = None + + # exclusive implies auto-delete. + if self.exclusive: + self.auto_delete = True + self.maybe_bind(channel) + + def bind(self, channel): + on_declared = self.on_declared + bound = super(Queue, self).bind(channel) + bound.on_declared = on_declared + return bound + + def __hash__(self): + return hash('Q|%s' % (self.name, )) + + def when_bound(self): + if self.exchange: + self.exchange = self.exchange(self.channel) + + def declare(self, nowait=False): + """Declares the queue, the exchange and binds the queue to + the exchange.""" + # - declare main binding. + if self.exchange: + self.exchange.declare(nowait) + self.queue_declare(nowait, passive=False) + + if self.exchange and self.exchange.name: + self.queue_bind(nowait) + + # - declare extra/multi-bindings. + for B in self.bindings: + B.declare(self.channel) + B.bind(self, nowait=nowait) + return self.name + + def queue_declare(self, nowait=False, passive=False): + """Declare queue on the server. + + :keyword nowait: Do not wait for a reply. + :keyword passive: If set, the server will not create the queue. + The client can use this to check whether a queue exists + without modifying the server state. + + """ + ret = self.channel.queue_declare(queue=self.name, + passive=passive, + durable=self.durable, + exclusive=self.exclusive, + auto_delete=self.auto_delete, + arguments=self.queue_arguments, + nowait=nowait) + if not self.name: + self.name = ret[0] + if self.on_declared: + self.on_declared(*ret) + return ret + + def queue_bind(self, nowait=False): + """Create the queue binding on the server.""" + return self.bind_to(self.exchange, self.routing_key, + self.binding_arguments, nowait=nowait) + + def bind_to(self, exchange='', routing_key='', + arguments=None, nowait=False): + if isinstance(exchange, Exchange): + exchange = exchange.name + return self.channel.queue_bind(queue=self.name, + exchange=exchange, + routing_key=routing_key, + arguments=arguments, + nowait=nowait) + + def get(self, no_ack=None, accept=None): + """Poll the server for a new message. + + Must return the message if a message was available, + or :const:`None` otherwise. + + :keyword no_ack: If enabled the broker will automatically + ack messages. + :keyword accept: Custom list of accepted content types. + + This method provides direct access to the messages in a + queue using a synchronous dialogue, designed for + specific types of applications where synchronous functionality + is more important than performance. + + """ + no_ack = self.no_ack if no_ack is None else no_ack + message = self.channel.basic_get(queue=self.name, no_ack=no_ack) + if message is not None: + m2p = getattr(self.channel, 'message_to_python', None) + if m2p: + message = m2p(message) + if message.errors: + message._reraise_error() + message.accept = prepare_accept_content(accept) + return message + + def purge(self, nowait=False): + """Remove all ready messages from the queue.""" + return self.channel.queue_purge(queue=self.name, + nowait=nowait) or 0 + + def consume(self, consumer_tag='', callback=None, + no_ack=None, nowait=False): + """Start a queue consumer. + + Consumers last as long as the channel they were created on, or + until the client cancels them. + + :keyword consumer_tag: Unique identifier for the consumer. The + consumer tag is local to a connection, so two clients + can use the same consumer tags. If this field is empty + the server will generate a unique tag. + + :keyword no_ack: If enabled the broker will automatically ack + messages. + + :keyword nowait: Do not wait for a reply. + + :keyword callback: callback called for each delivered message + + """ + if no_ack is None: + no_ack = self.no_ack + return self.channel.basic_consume(queue=self.name, + no_ack=no_ack, + consumer_tag=consumer_tag or '', + callback=callback, + nowait=nowait) + + def cancel(self, consumer_tag): + """Cancel a consumer by consumer tag.""" + return self.channel.basic_cancel(consumer_tag) + + def delete(self, if_unused=False, if_empty=False, nowait=False): + """Delete the queue. + + :keyword if_unused: If set, the server will only delete the queue + if it has no consumers. A channel error will be raised + if the queue has consumers. + + :keyword if_empty: If set, the server will only delete the queue + if it is empty. If it is not empty a channel error will be raised. + + :keyword nowait: Do not wait for a reply. + + """ + return self.channel.queue_delete(queue=self.name, + if_unused=if_unused, + if_empty=if_empty, + nowait=nowait) + + def queue_unbind(self, arguments=None, nowait=False): + return self.unbind_from(self.exchange, self.routing_key, + arguments, nowait) + + def unbind_from(self, exchange='', routing_key='', + arguments=None, nowait=False): + """Unbind queue by deleting the binding from the server.""" + return self.channel.queue_unbind(queue=self.name, + exchange=exchange.name, + routing_key=routing_key, + arguments=arguments, + nowait=nowait) + + def __eq__(self, other): + if isinstance(other, Queue): + return (self.name == other.name and + self.exchange == other.exchange and + self.routing_key == other.routing_key and + self.queue_arguments == other.queue_arguments and + self.binding_arguments == other.binding_arguments and + self.durable == other.durable and + self.exclusive == other.exclusive and + self.auto_delete == other.auto_delete) + return NotImplemented + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + s = super(Queue, self).__repr__ + if self.bindings: + return s('Queue {name} -> {bindings}'.format( + name=_reprstr(self.name), + bindings=pretty_bindings(self.bindings), + )) + return s( + 'Queue {name} -> {0.exchange!r} -> {routing_key}'.format( + self, name=_reprstr(self.name), + routing_key=_reprstr(self.routing_key), + ), + ) + + @property + def can_cache_declaration(self): + return not self.auto_delete + + @classmethod + def from_dict(self, queue, **options): + binding_key = options.get('binding_key') or options.get('routing_key') + + e_durable = options.get('exchange_durable') + if e_durable is None: + e_durable = options.get('durable') + + e_auto_delete = options.get('exchange_auto_delete') + if e_auto_delete is None: + e_auto_delete = options.get('auto_delete') + + q_durable = options.get('queue_durable') + if q_durable is None: + q_durable = options.get('durable') + + q_auto_delete = options.get('queue_auto_delete') + if q_auto_delete is None: + q_auto_delete = options.get('auto_delete') + + e_arguments = options.get('exchange_arguments') + q_arguments = options.get('queue_arguments') + b_arguments = options.get('binding_arguments') + bindings = options.get('bindings') + + exchange = Exchange(options.get('exchange'), + type=options.get('exchange_type'), + delivery_mode=options.get('delivery_mode'), + routing_key=options.get('routing_key'), + durable=e_durable, + auto_delete=e_auto_delete, + arguments=e_arguments) + return Queue(queue, + exchange=exchange, + routing_key=binding_key, + durable=q_durable, + exclusive=options.get('exclusive'), + auto_delete=q_auto_delete, + no_ack=options.get('no_ack'), + queue_arguments=q_arguments, + binding_arguments=b_arguments, + bindings=bindings) + + def as_dict(self, recurse=False): + res = super(Queue, self).as_dict(recurse) + if not recurse: + return res + bindings = res.get('bindings') + if bindings: + res['bindings'] = [b.as_dict(recurse=True) for b in bindings] + return res diff --git a/thesisenv/lib/python3.6/site-packages/kombu/exceptions.py b/thesisenv/lib/python3.6/site-packages/kombu/exceptions.py new file mode 100644 index 0000000..716bc69 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/exceptions.py @@ -0,0 +1,83 @@ +""" +kombu.exceptions +================ + +Exceptions. + +""" +from __future__ import absolute_import + +import socket + +from amqp import ChannelError, ConnectionError, ResourceError + +__all__ = ['NotBoundError', 'MessageStateError', 'TimeoutError', + 'LimitExceeded', 'ConnectionLimitExceeded', + 'ChannelLimitExceeded', 'ConnectionError', 'ChannelError', + 'VersionMismatch', 'SerializerNotInstalled', 'ResourceError', + 'SerializationError', 'EncodeError', 'DecodeError'] + +TimeoutError = socket.timeout + + +class KombuError(Exception): + """Common subclass for all Kombu exceptions.""" + pass + + +class SerializationError(KombuError): + """Failed to serialize/deserialize content.""" + + +class EncodeError(SerializationError): + """Cannot encode object.""" + pass + + +class DecodeError(SerializationError): + """Cannot decode object.""" + + +class NotBoundError(KombuError): + """Trying to call channel dependent method on unbound entity.""" + pass + + +class MessageStateError(KombuError): + """The message has already been acknowledged.""" + pass + + +class LimitExceeded(KombuError): + """Limit exceeded.""" + pass + + +class ConnectionLimitExceeded(LimitExceeded): + """Maximum number of simultaneous connections exceeded.""" + pass + + +class ChannelLimitExceeded(LimitExceeded): + """Maximum number of simultaneous channels exceeded.""" + pass + + +class VersionMismatch(KombuError): + pass + + +class SerializerNotInstalled(KombuError): + """Support for the requested serialization type is not installed""" + pass + + +class ContentDisallowed(SerializerNotInstalled): + """Consumer does not allow this content-type.""" + pass + + +class InconsistencyError(ConnectionError): + """Data or environment has been found to be inconsistent, + depending on the cause it may be possible to retry the operation.""" + pass diff --git a/thesisenv/lib/python3.6/site-packages/kombu/five.py b/thesisenv/lib/python3.6/site-packages/kombu/five.py new file mode 100644 index 0000000..d36f252 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/five.py @@ -0,0 +1,206 @@ +# -*- coding: utf-8 -*- +""" + celery.five + ~~~~~~~~~~~ + + Compatibility implementations of features + only available in newer Python versions. + + +""" +from __future__ import absolute_import + +# ############# py3k ######################################################### +import sys +PY3 = sys.version_info[0] == 3 + +try: + reload = reload # noqa +except NameError: # pragma: no cover + from imp import reload # noqa + +try: + from collections import UserList # noqa +except ImportError: # pragma: no cover + from UserList import UserList # noqa + +try: + from collections import UserDict # noqa +except ImportError: # pragma: no cover + from UserDict import UserDict # noqa + +try: + bytes_t = bytes +except NameError: # pragma: no cover + bytes_t = str # noqa + +# ############# time.monotonic ############################################### + +if sys.version_info < (3, 3): + + import platform + SYSTEM = platform.system() + + try: + import ctypes + except ImportError: # pragma: no cover + ctypes = None # noqa + + if SYSTEM == 'Darwin' and ctypes is not None: + from ctypes.util import find_library + libSystem = ctypes.CDLL(find_library('libSystem.dylib')) + CoreServices = ctypes.CDLL(find_library('CoreServices'), + use_errno=True) + mach_absolute_time = libSystem.mach_absolute_time + mach_absolute_time.restype = ctypes.c_uint64 + absolute_to_nanoseconds = CoreServices.AbsoluteToNanoseconds + absolute_to_nanoseconds.restype = ctypes.c_uint64 + absolute_to_nanoseconds.argtypes = [ctypes.c_uint64] + + def _monotonic(): + return absolute_to_nanoseconds(mach_absolute_time()) * 1e-9 + + elif SYSTEM == 'Linux' and ctypes is not None: + # from stackoverflow: + # questions/1205722/how-do-i-get-monotonic-time-durations-in-python + import os + + CLOCK_MONOTONIC = 1 # see + + class timespec(ctypes.Structure): + _fields_ = [ + ('tv_sec', ctypes.c_long), + ('tv_nsec', ctypes.c_long), + ] + + librt = ctypes.CDLL('librt.so.1', use_errno=True) + clock_gettime = librt.clock_gettime + clock_gettime.argtypes = [ + ctypes.c_int, ctypes.POINTER(timespec), + ] + + def _monotonic(): # noqa + t = timespec() + if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) != 0: + errno_ = ctypes.get_errno() + raise OSError(errno_, os.strerror(errno_)) + return t.tv_sec + t.tv_nsec * 1e-9 + else: + from time import time as _monotonic +try: + from time import monotonic +except ImportError: + monotonic = _monotonic # noqa + +# ############# Py3 <-> Py2 ################################################## + +if PY3: # pragma: no cover + import builtins + + from queue import Queue, Empty, Full, LifoQueue + from itertools import zip_longest + from io import StringIO, BytesIO + + map = map + zip = zip + string = str + string_t = str + long_t = int + text_t = str + range = range + module_name_t = str + + open_fqdn = 'builtins.open' + + def items(d): + return d.items() + + def keys(d): + return d.keys() + + def values(d): + return d.values() + + def nextfun(it): + return it.__next__ + + exec_ = getattr(builtins, 'exec') + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + + class WhateverIO(StringIO): + + def write(self, data): + if isinstance(data, bytes): + data = data.encode() + StringIO.write(self, data) + +else: + import __builtin__ as builtins # noqa + from Queue import Queue, Empty, Full, LifoQueue # noqa + from itertools import ( # noqa + imap as map, + izip as zip, + izip_longest as zip_longest, + ) + try: + from cStringIO import StringIO # noqa + except ImportError: # pragma: no cover + from StringIO import StringIO # noqa + + string = unicode # noqa + string_t = basestring # noqa + text_t = unicode + long_t = long # noqa + range = xrange + module_name_t = str + + open_fqdn = '__builtin__.open' + + def items(d): # noqa + return d.iteritems() + + def keys(d): # noqa + return d.iterkeys() + + def values(d): # noqa + return d.itervalues() + + def nextfun(it): # noqa + return it.next + + def exec_(code, globs=None, locs=None): # pragma: no cover + """Execute code in a namespace.""" + if globs is None: + frame = sys._getframe(1) + globs = frame.f_globals + if locs is None: + locs = frame.f_locals + del frame + elif locs is None: + locs = globs + exec("""exec code in globs, locs""") + + exec_("""def reraise(tp, value, tb=None): raise tp, value, tb""") + + BytesIO = WhateverIO = StringIO # noqa + + +def with_metaclass(Type, skip_attrs=set(['__dict__', '__weakref__'])): + """Class decorator to set metaclass. + + Works with both Python 3 and Python 3 and it does not add + an extra class in the lookup order like ``six.with_metaclass`` does + (that is -- it copies the original class instead of using inheritance). + + """ + + def _clone_with_metaclass(Class): + attrs = dict((key, value) for key, value in items(vars(Class)) + if key not in skip_attrs) + return Type(Class.__name__, Class.__bases__, attrs) + + return _clone_with_metaclass diff --git a/thesisenv/lib/python3.6/site-packages/kombu/log.py b/thesisenv/lib/python3.6/site-packages/kombu/log.py new file mode 100644 index 0000000..2a7db9b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/log.py @@ -0,0 +1,147 @@ +from __future__ import absolute_import + +import logging +import numbers +import os +import sys + +from logging.handlers import WatchedFileHandler + +from .five import string_t +from .utils import cached_property +from .utils.encoding import safe_repr, safe_str +from .utils.functional import maybe_evaluate + +__all__ = ['LogMixin', 'LOG_LEVELS', 'get_loglevel', 'setup_logging'] + +try: + LOG_LEVELS = dict(logging._nameToLevel) + LOG_LEVELS.update(logging._levelToName) +except AttributeError: + LOG_LEVELS = dict(logging._levelNames) +LOG_LEVELS.setdefault('FATAL', logging.FATAL) +LOG_LEVELS.setdefault(logging.FATAL, 'FATAL') +DISABLE_TRACEBACKS = os.environ.get('DISABLE_TRACEBACKS') + + +class NullHandler(logging.Handler): + + def emit(self, record): + pass + + +def get_logger(logger): + if isinstance(logger, string_t): + logger = logging.getLogger(logger) + if not logger.handlers: + logger.addHandler(NullHandler()) + return logger + + +def get_loglevel(level): + if isinstance(level, string_t): + return LOG_LEVELS[level] + return level + + +def naive_format_parts(fmt): + parts = fmt.split('%') + for i, e in enumerate(parts[1:]): + yield None if not e or not parts[i - 1] else e[0] + + +def safeify_format(fmt, args, + filters={'s': safe_str, + 'r': safe_repr}): + for index, type in enumerate(naive_format_parts(fmt)): + filt = filters.get(type) + yield filt(args[index]) if filt else args[index] + + +class LogMixin(object): + + def debug(self, *args, **kwargs): + return self.log(logging.DEBUG, *args, **kwargs) + + def info(self, *args, **kwargs): + return self.log(logging.INFO, *args, **kwargs) + + def warn(self, *args, **kwargs): + return self.log(logging.WARN, *args, **kwargs) + + def error(self, *args, **kwargs): + return self._error(logging.ERROR, *args, **kwargs) + + def critical(self, *args, **kwargs): + return self._error(logging.CRITICAL, *args, **kwargs) + + def _error(self, severity, *args, **kwargs): + kwargs.setdefault('exc_info', True) + if DISABLE_TRACEBACKS: + kwargs.pop('exc_info', None) + return self.log(severity, *args, **kwargs) + + def annotate(self, text): + return '%s - %s' % (self.logger_name, text) + + def log(self, severity, *args, **kwargs): + if self.logger.isEnabledFor(severity): + log = self.logger.log + if len(args) > 1 and isinstance(args[0], string_t): + expand = [maybe_evaluate(arg) for arg in args[1:]] + return log(severity, + self.annotate(args[0].replace('%r', '%s')), + *list(safeify_format(args[0], expand)), **kwargs) + else: + return self.logger.log( + severity, self.annotate(' '.join(map(safe_str, args))), + **kwargs) + + def get_logger(self): + return get_logger(self.logger_name) + + def is_enabled_for(self, level): + return self.logger.isEnabledFor(self.get_loglevel(level)) + + def get_loglevel(self, level): + if not isinstance(level, numbers.Integral): + return LOG_LEVELS[level] + return level + + @cached_property + def logger(self): + return self.get_logger() + + @property + def logger_name(self): + return self.__class__.__name__ + + +class Log(LogMixin): + + def __init__(self, name, logger=None): + self._logger_name = name + self._logger = logger + + def get_logger(self): + if self._logger: + return self._logger + return LogMixin.get_logger(self) + + @property + def logger_name(self): + return self._logger_name + + +def setup_logging(loglevel=None, logfile=None): + logger = logging.getLogger() + loglevel = get_loglevel(loglevel or 'ERROR') + logfile = logfile if logfile else sys.__stderr__ + if not logger.handlers: + if hasattr(logfile, 'write'): + handler = logging.StreamHandler(logfile) + else: + handler = WatchedFileHandler(logfile) + logger.addHandler(handler) + logger.setLevel(loglevel) + return logger diff --git a/thesisenv/lib/python3.6/site-packages/kombu/message.py b/thesisenv/lib/python3.6/site-packages/kombu/message.py new file mode 100644 index 0000000..5f7ae52 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/message.py @@ -0,0 +1,154 @@ +""" +kombu.transport.message +======================= + +Message class. + +""" +from __future__ import absolute_import + +import sys + +from .compression import decompress +from .exceptions import MessageStateError +from .five import reraise, text_t +from .serialization import loads + +ACK_STATES = frozenset(['ACK', 'REJECTED', 'REQUEUED']) + + +class Message(object): + """Base class for received messages.""" + __slots__ = ('_state', 'channel', 'delivery_tag', + 'content_type', 'content_encoding', + 'delivery_info', 'headers', 'properties', + 'body', '_decoded_cache', 'accept', '__dict__') + MessageStateError = MessageStateError + + errors = None + + def __init__(self, channel, body=None, delivery_tag=None, + content_type=None, content_encoding=None, delivery_info={}, + properties=None, headers=None, postencode=None, + accept=None, **kwargs): + self.errors = [] if self.errors is None else self.errors + self.channel = channel + self.delivery_tag = delivery_tag + self.content_type = content_type + self.content_encoding = content_encoding + self.delivery_info = delivery_info + self.headers = headers or {} + self.properties = properties or {} + self._decoded_cache = None + self._state = 'RECEIVED' + self.accept = accept + + compression = self.headers.get('compression') + if not self.errors and compression: + try: + body = decompress(body, compression) + except Exception: + self.errors.append(sys.exc_info()) + + if not self.errors and postencode and isinstance(body, text_t): + try: + body = body.encode(postencode) + except Exception: + self.errors.append(sys.exc_info()) + self.body = body + + def _reraise_error(self, callback=None): + try: + reraise(*self.errors[0]) + except Exception as exc: + if not callback: + raise + callback(self, exc) + + def ack(self): + """Acknowledge this message as being processed., + This will remove the message from the queue. + + :raises MessageStateError: If the message has already been + acknowledged/requeued/rejected. + + """ + if self.channel.no_ack_consumers is not None: + try: + consumer_tag = self.delivery_info['consumer_tag'] + except KeyError: + pass + else: + if consumer_tag in self.channel.no_ack_consumers: + return + if self.acknowledged: + raise self.MessageStateError( + 'Message already acknowledged with state: {0._state}'.format( + self)) + self.channel.basic_ack(self.delivery_tag) + self._state = 'ACK' + + def ack_log_error(self, logger, errors): + try: + self.ack() + except errors as exc: + logger.critical("Couldn't ack %r, reason:%r", + self.delivery_tag, exc, exc_info=True) + + def reject_log_error(self, logger, errors, requeue=False): + try: + self.reject(requeue=requeue) + except errors as exc: + logger.critical("Couldn't reject %r, reason: %r", + self.delivery_tag, exc, exc_info=True) + + def reject(self, requeue=False): + """Reject this message. + + The message will be discarded by the server. + + :raises MessageStateError: If the message has already been + acknowledged/requeued/rejected. + + """ + if self.acknowledged: + raise self.MessageStateError( + 'Message already acknowledged with state: {0._state}'.format( + self)) + self.channel.basic_reject(self.delivery_tag, requeue=requeue) + self._state = 'REJECTED' + + def requeue(self): + """Reject this message and put it back on the queue. + + You must not use this method as a means of selecting messages + to process. + + :raises MessageStateError: If the message has already been + acknowledged/requeued/rejected. + + """ + if self.acknowledged: + raise self.MessageStateError( + 'Message already acknowledged with state: {0._state}'.format( + self)) + self.channel.basic_reject(self.delivery_tag, requeue=True) + self._state = 'REQUEUED' + + def decode(self): + """Deserialize the message body, returning the original + python structure sent by the publisher.""" + return loads(self.body, self.content_type, + self.content_encoding, accept=self.accept) + + @property + def acknowledged(self): + """Set to true if the message has been acknowledged.""" + return self._state in ACK_STATES + + @property + def payload(self): + """The decoded message body.""" + if not self._decoded_cache: + self._decoded_cache = self.decode() + return self._decoded_cache diff --git a/thesisenv/lib/python3.6/site-packages/kombu/messaging.py b/thesisenv/lib/python3.6/site-packages/kombu/messaging.py new file mode 100644 index 0000000..baa9ef9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/messaging.py @@ -0,0 +1,608 @@ +""" +kombu.messaging +=============== + +Sending and receiving messages. + +""" +from __future__ import absolute_import + +import numbers + +from itertools import count + +from .common import maybe_declare +from .compression import compress +from .connection import maybe_channel, is_connection +from .entity import Exchange, Queue, DELIVERY_MODES +from .exceptions import ContentDisallowed +from .five import text_t, values +from .serialization import dumps, prepare_accept_content +from .utils import ChannelPromise, maybe_list + +__all__ = ['Exchange', 'Queue', 'Producer', 'Consumer'] + + +class Producer(object): + """Message Producer. + + :param channel: Connection or channel. + :keyword exchange: Optional default exchange. + :keyword routing_key: Optional default routing key. + :keyword serializer: Default serializer. Default is `"json"`. + :keyword compression: Default compression method. Default is no + compression. + :keyword auto_declare: Automatically declare the default exchange + at instantiation. Default is :const:`True`. + :keyword on_return: Callback to call for undeliverable messages, + when the `mandatory` or `immediate` arguments to + :meth:`publish` is used. This callback needs the following + signature: `(exception, exchange, routing_key, message)`. + Note that the producer needs to drain events to use this feature. + + """ + + #: Default exchange + exchange = None + + #: Default routing key. + routing_key = '' + + #: Default serializer to use. Default is JSON. + serializer = None + + #: Default compression method. Disabled by default. + compression = None + + #: By default the exchange is declared at instantiation. + #: If you want to declare manually then you can set this + #: to :const:`False`. + auto_declare = True + + #: Basic return callback. + on_return = None + + #: Set if channel argument was a Connection instance (using + #: default_channel). + __connection__ = None + + def __init__(self, channel, exchange=None, routing_key=None, + serializer=None, auto_declare=None, compression=None, + on_return=None): + self._channel = channel + self.exchange = exchange + self.routing_key = routing_key or self.routing_key + self.serializer = serializer or self.serializer + self.compression = compression or self.compression + self.on_return = on_return or self.on_return + self._channel_promise = None + if self.exchange is None: + self.exchange = Exchange('') + if auto_declare is not None: + self.auto_declare = auto_declare + + if self._channel: + self.revive(self._channel) + + def __repr__(self): + return ''.format(self) + + def __reduce__(self): + return self.__class__, self.__reduce_args__() + + def __reduce_args__(self): + return (None, self.exchange, self.routing_key, self.serializer, + self.auto_declare, self.compression) + + def declare(self): + """Declare the exchange. + + This happens automatically at instantiation if + :attr:`auto_declare` is enabled. + + """ + if self.exchange.name: + self.exchange.declare() + + def maybe_declare(self, entity, retry=False, **retry_policy): + """Declare the exchange if it hasn't already been declared + during this session.""" + if entity: + return maybe_declare(entity, self.channel, retry, **retry_policy) + + def publish(self, body, routing_key=None, delivery_mode=None, + mandatory=False, immediate=False, priority=0, + content_type=None, content_encoding=None, serializer=None, + headers=None, compression=None, exchange=None, retry=False, + retry_policy=None, declare=[], expiration=None, **properties): + """Publish message to the specified exchange. + + :param body: Message body. + :keyword routing_key: Message routing key. + :keyword delivery_mode: See :attr:`delivery_mode`. + :keyword mandatory: Currently not supported. + :keyword immediate: Currently not supported. + :keyword priority: Message priority. A number between 0 and 9. + :keyword content_type: Content type. Default is auto-detect. + :keyword content_encoding: Content encoding. Default is auto-detect. + :keyword serializer: Serializer to use. Default is auto-detect. + :keyword compression: Compression method to use. Default is none. + :keyword headers: Mapping of arbitrary headers to pass along + with the message body. + :keyword exchange: Override the exchange. Note that this exchange + must have been declared. + :keyword declare: Optional list of required entities that must + have been declared before publishing the message. The entities + will be declared using :func:`~kombu.common.maybe_declare`. + :keyword retry: Retry publishing, or declaring entities if the + connection is lost. + :keyword retry_policy: Retry configuration, this is the keywords + supported by :meth:`~kombu.Connection.ensure`. + :keyword expiration: A TTL in seconds can be specified per message. + Default is no expiration. + :keyword \*\*properties: Additional message properties, see AMQP spec. + + """ + headers = {} if headers is None else headers + retry_policy = {} if retry_policy is None else retry_policy + routing_key = self.routing_key if routing_key is None else routing_key + compression = self.compression if compression is None else compression + exchange = exchange or self.exchange + + if isinstance(exchange, Exchange): + delivery_mode = delivery_mode or exchange.delivery_mode + exchange = exchange.name + else: + delivery_mode = delivery_mode or self.exchange.delivery_mode + if not isinstance(delivery_mode, numbers.Integral): + delivery_mode = DELIVERY_MODES[delivery_mode] + properties['delivery_mode'] = delivery_mode + if expiration is not None: + properties['expiration'] = str(int(expiration*1000)) + + body, content_type, content_encoding = self._prepare( + body, serializer, content_type, content_encoding, + compression, headers) + + publish = self._publish + if retry: + publish = self.connection.ensure(self, publish, **retry_policy) + return publish(body, priority, content_type, + content_encoding, headers, properties, + routing_key, mandatory, immediate, exchange, declare) + + def _publish(self, body, priority, content_type, content_encoding, + headers, properties, routing_key, mandatory, + immediate, exchange, declare): + channel = self.channel + message = channel.prepare_message( + body, priority, content_type, + content_encoding, headers, properties, + ) + if declare: + maybe_declare = self.maybe_declare + [maybe_declare(entity) for entity in declare] + return channel.basic_publish( + message, + exchange=exchange, routing_key=routing_key, + mandatory=mandatory, immediate=immediate, + ) + + def _get_channel(self): + channel = self._channel + if isinstance(channel, ChannelPromise): + channel = self._channel = channel() + self.exchange.revive(channel) + if self.on_return: + channel.events['basic_return'].add(self.on_return) + return channel + + def _set_channel(self, channel): + self._channel = channel + channel = property(_get_channel, _set_channel) + + def revive(self, channel): + """Revive the producer after connection loss.""" + if is_connection(channel): + connection = channel + self.__connection__ = connection + channel = ChannelPromise(lambda: connection.default_channel) + if isinstance(channel, ChannelPromise): + self._channel = channel + self.exchange = self.exchange(channel) + else: + # Channel already concrete + self._channel = channel + if self.on_return: + self._channel.events['basic_return'].add(self.on_return) + self.exchange = self.exchange(channel) + if self.auto_declare: + # auto_decare is not recommended as this will force + # evaluation of the channel. + self.declare() + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.release() + + def release(self): + pass + close = release + + def _prepare(self, body, serializer=None, content_type=None, + content_encoding=None, compression=None, headers=None): + + # No content_type? Then we're serializing the data internally. + if not content_type: + serializer = serializer or self.serializer + (content_type, content_encoding, + body) = dumps(body, serializer=serializer) + else: + # If the programmer doesn't want us to serialize, + # make sure content_encoding is set. + if isinstance(body, text_t): + if not content_encoding: + content_encoding = 'utf-8' + body = body.encode(content_encoding) + + # If they passed in a string, we can't know anything + # about it. So assume it's binary data. + elif not content_encoding: + content_encoding = 'binary' + + if compression: + body, headers['compression'] = compress(body, compression) + + return body, content_type, content_encoding + + @property + def connection(self): + try: + return self.__connection__ or self.channel.connection.client + except AttributeError: + pass + + +class Consumer(object): + """Message consumer. + + :param channel: see :attr:`channel`. + :param queues: see :attr:`queues`. + :keyword no_ack: see :attr:`no_ack`. + :keyword auto_declare: see :attr:`auto_declare` + :keyword callbacks: see :attr:`callbacks`. + :keyword on_message: See :attr:`on_message` + :keyword on_decode_error: see :attr:`on_decode_error`. + + """ + ContentDisallowed = ContentDisallowed + + #: The connection/channel to use for this consumer. + channel = None + + #: A single :class:`~kombu.Queue`, or a list of queues to + #: consume from. + queues = None + + #: Flag for automatic message acknowledgment. + #: If enabled the messages are automatically acknowledged by the + #: broker. This can increase performance but means that you + #: have no control of when the message is removed. + #: + #: Disabled by default. + no_ack = None + + #: By default all entities will be declared at instantiation, if you + #: want to handle this manually you can set this to :const:`False`. + auto_declare = True + + #: List of callbacks called in order when a message is received. + #: + #: The signature of the callbacks must take two arguments: + #: `(body, message)`, which is the decoded message body and + #: the `Message` instance (a subclass of + #: :class:`~kombu.transport.base.Message`). + callbacks = None + + #: Optional function called whenever a message is received. + #: + #: When defined this function will be called instead of the + #: :meth:`receive` method, and :attr:`callbacks` will be disabled. + #: + #: So this can be used as an alternative to :attr:`callbacks` when + #: you don't want the body to be automatically decoded. + #: Note that the message will still be decompressed if the message + #: has the ``compression`` header set. + #: + #: The signature of the callback must take a single argument, + #: which is the raw message object (a subclass of + #: :class:`~kombu.transport.base.Message`). + #: + #: Also note that the ``message.body`` attribute, which is the raw + #: contents of the message body, may in some cases be a read-only + #: :class:`buffer` object. + on_message = None + + #: Callback called when a message can't be decoded. + #: + #: The signature of the callback must take two arguments: `(message, + #: exc)`, which is the message that can't be decoded and the exception + #: that occurred while trying to decode it. + on_decode_error = None + + #: List of accepted content-types. + #: + #: An exception will be raised if the consumer receives + #: a message with an untrusted content type. + #: By default all content-types are accepted, but not if + #: :func:`kombu.disable_untrusted_serializers` was called, + #: in which case only json is allowed. + accept = None + + _tags = count(1) # global + + def __init__(self, channel, queues=None, no_ack=None, auto_declare=None, + callbacks=None, on_decode_error=None, on_message=None, + accept=None, tag_prefix=None): + self.channel = channel + self.queues = self.queues or [] if queues is None else queues + self.no_ack = self.no_ack if no_ack is None else no_ack + self.callbacks = (self.callbacks or [] if callbacks is None + else callbacks) + self.on_message = on_message + self.tag_prefix = tag_prefix + self._active_tags = {} + if auto_declare is not None: + self.auto_declare = auto_declare + if on_decode_error is not None: + self.on_decode_error = on_decode_error + self.accept = prepare_accept_content(accept) + + if self.channel: + self.revive(self.channel) + + def revive(self, channel): + """Revive consumer after connection loss.""" + self._active_tags.clear() + channel = self.channel = maybe_channel(channel) + self.queues = [queue(self.channel) + for queue in maybe_list(self.queues)] + for queue in self.queues: + queue.revive(channel) + + if self.auto_declare: + self.declare() + + def declare(self): + """Declare queues, exchanges and bindings. + + This is done automatically at instantiation if :attr:`auto_declare` + is set. + + """ + for queue in self.queues: + queue.declare() + + def register_callback(self, callback): + """Register a new callback to be called when a message + is received. + + The signature of the callback needs to accept two arguments: + `(body, message)`, which is the decoded message body + and the `Message` instance (a subclass of + :class:`~kombu.transport.base.Message`. + + """ + self.callbacks.append(callback) + + def __enter__(self): + self.consume() + return self + + def __exit__(self, *exc_info): + try: + self.cancel() + except Exception: + pass + + def add_queue(self, queue): + """Add a queue to the list of queues to consume from. + + This will not start consuming from the queue, + for that you will have to call :meth:`consume` after. + + """ + queue = queue(self.channel) + if self.auto_declare: + queue.declare() + self.queues.append(queue) + return queue + + def add_queue_from_dict(self, queue, **options): + """This method is deprecated. + + Instead please use:: + + consumer.add_queue(Queue.from_dict(d)) + + """ + return self.add_queue(Queue.from_dict(queue, **options)) + + def consume(self, no_ack=None): + """Start consuming messages. + + Can be called multiple times, but note that while it + will consume from new queues added since the last call, + it will not cancel consuming from removed queues ( + use :meth:`cancel_by_queue`). + + :param no_ack: See :attr:`no_ack`. + + """ + if self.queues: + no_ack = self.no_ack if no_ack is None else no_ack + + H, T = self.queues[:-1], self.queues[-1] + for queue in H: + self._basic_consume(queue, no_ack=no_ack, nowait=True) + self._basic_consume(T, no_ack=no_ack, nowait=False) + + def cancel(self): + """End all active queue consumers. + + This does not affect already delivered messages, but it does + mean the server will not send any more messages for this consumer. + + """ + cancel = self.channel.basic_cancel + for tag in values(self._active_tags): + cancel(tag) + self._active_tags.clear() + close = cancel + + def cancel_by_queue(self, queue): + """Cancel consumer by queue name.""" + try: + tag = self._active_tags.pop(queue) + except KeyError: + pass + else: + self.queues[:] = [q for q in self.queues if q.name != queue] + self.channel.basic_cancel(tag) + + def consuming_from(self, queue): + """Return :const:`True` if the consumer is currently + consuming from queue'.""" + name = queue + if isinstance(queue, Queue): + name = queue.name + return name in self._active_tags + + def purge(self): + """Purge messages from all queues. + + .. warning:: + This will *delete all ready messages*, there is no + undo operation. + + """ + return sum(queue.purge() for queue in self.queues) + + def flow(self, active): + """Enable/disable flow from peer. + + This is a simple flow-control mechanism that a peer can use + to avoid overflowing its queues or otherwise finding itself + receiving more messages than it can process. + + The peer that receives a request to stop sending content + will finish sending the current content (if any), and then wait + until flow is reactivated. + + """ + self.channel.flow(active) + + def qos(self, prefetch_size=0, prefetch_count=0, apply_global=False): + """Specify quality of service. + + The client can request that messages should be sent in + advance so that when the client finishes processing a message, + the following message is already held locally, rather than needing + to be sent down the channel. Prefetching gives a performance + improvement. + + The prefetch window is Ignored if the :attr:`no_ack` option is set. + + :param prefetch_size: Specify the prefetch window in octets. + The server will send a message in advance if it is equal to + or smaller in size than the available prefetch size (and + also falls within other prefetch limits). May be set to zero, + meaning "no specific limit", although other prefetch limits + may still apply. + + :param prefetch_count: Specify the prefetch window in terms of + whole messages. + + :param apply_global: Apply new settings globally on all channels. + + """ + return self.channel.basic_qos(prefetch_size, + prefetch_count, + apply_global) + + def recover(self, requeue=False): + """Redeliver unacknowledged messages. + + Asks the broker to redeliver all unacknowledged messages + on the specified channel. + + :keyword requeue: By default the messages will be redelivered + to the original recipient. With `requeue` set to true, the + server will attempt to requeue the message, potentially then + delivering it to an alternative subscriber. + + """ + return self.channel.basic_recover(requeue=requeue) + + def receive(self, body, message): + """Method called when a message is received. + + This dispatches to the registered :attr:`callbacks`. + + :param body: The decoded message body. + :param message: The `Message` instance. + + :raises NotImplementedError: If no consumer callbacks have been + registered. + + """ + callbacks = self.callbacks + if not callbacks: + raise NotImplementedError('Consumer does not have any callbacks') + [callback(body, message) for callback in callbacks] + + def _basic_consume(self, queue, consumer_tag=None, + no_ack=no_ack, nowait=True): + tag = self._active_tags.get(queue.name) + if tag is None: + tag = self._add_tag(queue, consumer_tag) + queue.consume(tag, self._receive_callback, + no_ack=no_ack, nowait=nowait) + return tag + + def _add_tag(self, queue, consumer_tag=None): + tag = consumer_tag or '{0}{1}'.format( + self.tag_prefix, next(self._tags)) + self._active_tags[queue.name] = tag + return tag + + def _receive_callback(self, message): + accept = self.accept + on_m, channel, decoded = self.on_message, self.channel, None + try: + m2p = getattr(channel, 'message_to_python', None) + if m2p: + message = m2p(message) + if accept is not None: + message.accept = accept + if message.errors: + return message._reraise_error(self.on_decode_error) + decoded = None if on_m else message.decode() + except Exception as exc: + if not self.on_decode_error: + raise + self.on_decode_error(message, exc) + else: + return on_m(message) if on_m else self.receive(decoded, message) + + def __repr__(self): + return ''.format(self) + + @property + def connection(self): + try: + return self.channel.connection.client + except AttributeError: + pass diff --git a/thesisenv/lib/python3.6/site-packages/kombu/mixins.py b/thesisenv/lib/python3.6/site-packages/kombu/mixins.py new file mode 100644 index 0000000..f91cf65 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/mixins.py @@ -0,0 +1,257 @@ +# -*- coding: utf-8 -*- +""" +kombu.mixins +============ + +Useful mixin classes. + +""" +from __future__ import absolute_import + +import socket + +from contextlib import contextmanager +from functools import partial +from itertools import count +from time import sleep + +from .common import ignore_errors +from .five import range +from .messaging import Consumer +from .log import get_logger +from .utils import cached_property, nested +from .utils.encoding import safe_repr +from .utils.limits import TokenBucket + +__all__ = ['ConsumerMixin'] + +logger = get_logger(__name__) +debug, info, warn, error = logger.debug, logger.info, logger.warn, logger.error + +W_CONN_LOST = """\ +Connection to broker lost, trying to re-establish connection...\ +""" + +W_CONN_ERROR = """\ +Broker connection error, trying again in %s seconds: %r.\ +""" + + +class ConsumerMixin(object): + """Convenience mixin for implementing consumer programs. + + It can be used outside of threads, with threads, or greenthreads + (eventlet/gevent) too. + + The basic class would need a :attr:`connection` attribute + which must be a :class:`~kombu.Connection` instance, + and define a :meth:`get_consumers` method that returns a list + of :class:`kombu.Consumer` instances to use. + Supporting multiple consumers is important so that multiple + channels can be used for different QoS requirements. + + **Example**: + + .. code-block:: python + + + class Worker(ConsumerMixin): + task_queue = Queue('tasks', Exchange('tasks'), 'tasks')) + + def __init__(self, connection): + self.connection = None + + def get_consumers(self, Consumer, channel): + return [Consumer(queues=[self.task_queue], + callbacks=[self.on_task])] + + def on_task(self, body, message): + print('Got task: {0!r}'.format(body)) + message.ack() + + **Additional handler methods**: + + * :meth:`extra_context` + + Optional extra context manager that will be entered + after the connection and consumers have been set up. + + Takes arguments ``(connection, channel)``. + + * :meth:`on_connection_error` + + Handler called if the connection is lost/ or + is unavailable. + + Takes arguments ``(exc, interval)``, where interval + is the time in seconds when the connection will be retried. + + The default handler will log the exception. + + * :meth:`on_connection_revived` + + Handler called as soon as the connection is re-established + after connection failure. + + Takes no arguments. + + * :meth:`on_consume_ready` + + Handler called when the consumer is ready to accept + messages. + + Takes arguments ``(connection, channel, consumers)``. + Also keyword arguments to ``consume`` are forwarded + to this handler. + + * :meth:`on_consume_end` + + Handler called after the consumers are cancelled. + Takes arguments ``(connection, channel)``. + + * :meth:`on_iteration` + + Handler called for every iteration while draining + events. + + Takes no arguments. + + * :meth:`on_decode_error` + + Handler called if a consumer was unable to decode + the body of a message. + + Takes arguments ``(message, exc)`` where message is the + original message object. + + The default handler will log the error and + acknowledge the message, so if you override make + sure to call super, or perform these steps yourself. + + """ + + #: maximum number of retries trying to re-establish the connection, + #: if the connection is lost/unavailable. + connect_max_retries = None + + #: When this is set to true the consumer should stop consuming + #: and return, so that it can be joined if it is the implementation + #: of a thread. + should_stop = False + + def get_consumers(self, Consumer, channel): + raise NotImplementedError('Subclass responsibility') + + def on_connection_revived(self): + pass + + def on_consume_ready(self, connection, channel, consumers, **kwargs): + pass + + def on_consume_end(self, connection, channel): + pass + + def on_iteration(self): + pass + + def on_decode_error(self, message, exc): + error("Can't decode message body: %r (type:%r encoding:%r raw:%r')", + exc, message.content_type, message.content_encoding, + safe_repr(message.body)) + message.ack() + + def on_connection_error(self, exc, interval): + warn(W_CONN_ERROR, interval, exc, exc_info=1) + + @contextmanager + def extra_context(self, connection, channel): + yield + + def run(self, _tokens=1): + restart_limit = self.restart_limit + errors = (self.connection.connection_errors + + self.connection.channel_errors) + while not self.should_stop: + try: + if restart_limit.can_consume(_tokens): + for _ in self.consume(limit=None): # pragma: no cover + pass + else: + sleep(restart_limit.expected_time(_tokens)) + except errors: + warn(W_CONN_LOST, exc_info=1) + + @contextmanager + def consumer_context(self, **kwargs): + with self.Consumer() as (connection, channel, consumers): + with self.extra_context(connection, channel): + self.on_consume_ready(connection, channel, consumers, **kwargs) + yield connection, channel, consumers + + def consume(self, limit=None, timeout=None, safety_interval=1, **kwargs): + elapsed = 0 + with self.consumer_context(**kwargs) as (conn, channel, consumers): + for i in limit and range(limit) or count(): + if self.should_stop: + break + self.on_iteration() + try: + conn.drain_events(timeout=safety_interval) + except socket.timeout: + conn.heartbeat_check() + elapsed += safety_interval + if timeout and elapsed >= timeout: + raise + except socket.error: + if not self.should_stop: + raise + else: + yield + elapsed = 0 + debug('consume exiting') + + def maybe_conn_error(self, fun): + """Use :func:`kombu.common.ignore_errors` instead.""" + return ignore_errors(self, fun) + + def create_connection(self): + return self.connection.clone() + + @contextmanager + def establish_connection(self): + with self.create_connection() as conn: + conn.ensure_connection(self.on_connection_error, + self.connect_max_retries) + yield conn + + @contextmanager + def Consumer(self): + with self.establish_connection() as conn: + self.on_connection_revived() + info('Connected to %s', conn.as_uri()) + channel = conn.default_channel + cls = partial(Consumer, channel, + on_decode_error=self.on_decode_error) + with self._consume_from(*self.get_consumers(cls, channel)) as c: + yield conn, channel, c + debug('Consumers cancelled') + self.on_consume_end(conn, channel) + debug('Connection closed') + + def _consume_from(self, *consumers): + return nested(*consumers) + + @cached_property + def restart_limit(self): + # the AttributeError that can be catched from amqplib + # poses problems for the too often restarts protection + # in Connection.ensure_connection + return TokenBucket(1) + + @cached_property + def connection_errors(self): + return self.connection.connection_errors + + @cached_property + def channel_errors(self): + return self.connection.channel_errors diff --git a/thesisenv/lib/python3.6/site-packages/kombu/pidbox.py b/thesisenv/lib/python3.6/site-packages/kombu/pidbox.py new file mode 100644 index 0000000..3a31311 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/pidbox.py @@ -0,0 +1,364 @@ +""" +kombu.pidbox +=============== + +Generic process mailbox. + +""" +from __future__ import absolute_import + +import socket +import warnings + +from collections import defaultdict, deque +from copy import copy +from itertools import count +from threading import local +from time import time + +from . import Exchange, Queue, Consumer, Producer +from .clocks import LamportClock +from .common import maybe_declare, oid_from +from .exceptions import InconsistencyError +from .five import range +from .log import get_logger +from .utils import cached_property, kwdict, uuid, reprcall + +REPLY_QUEUE_EXPIRES = 10 + +W_PIDBOX_IN_USE = """\ +A node named {node.hostname} is already using this process mailbox! + +Maybe you forgot to shutdown the other node or did not do so properly? +Or if you meant to start multiple nodes on the same host please make sure +you give each node a unique node name! +""" + +__all__ = ['Node', 'Mailbox'] +logger = get_logger(__name__) +debug, error = logger.debug, logger.error + + +class Node(object): + + #: hostname of the node. + hostname = None + + #: the :class:`Mailbox` this is a node for. + mailbox = None + + #: map of method name/handlers. + handlers = None + + #: current context (passed on to handlers) + state = None + + #: current channel. + channel = None + + def __init__(self, hostname, state=None, channel=None, + handlers=None, mailbox=None): + self.channel = channel + self.mailbox = mailbox + self.hostname = hostname + self.state = state + self.adjust_clock = self.mailbox.clock.adjust + if handlers is None: + handlers = {} + self.handlers = handlers + + def Consumer(self, channel=None, no_ack=True, accept=None, **options): + queue = self.mailbox.get_queue(self.hostname) + + def verify_exclusive(name, messages, consumers): + if consumers: + warnings.warn(W_PIDBOX_IN_USE.format(node=self)) + queue.on_declared = verify_exclusive + + return Consumer( + channel or self.channel, [queue], no_ack=no_ack, + accept=self.mailbox.accept if accept is None else accept, + **options + ) + + def handler(self, fun): + self.handlers[fun.__name__] = fun + return fun + + def on_decode_error(self, message, exc): + error('Cannot decode message: %r', exc, exc_info=1) + + def listen(self, channel=None, callback=None): + consumer = self.Consumer(channel=channel, + callbacks=[callback or self.handle_message], + on_decode_error=self.on_decode_error) + consumer.consume() + return consumer + + def dispatch(self, method, arguments=None, + reply_to=None, ticket=None, **kwargs): + arguments = arguments or {} + debug('pidbox received method %s [reply_to:%s ticket:%s]', + reprcall(method, (), kwargs=arguments), reply_to, ticket) + handle = reply_to and self.handle_call or self.handle_cast + try: + reply = handle(method, kwdict(arguments)) + except SystemExit: + raise + except Exception as exc: + error('pidbox command error: %r', exc, exc_info=1) + reply = {'error': repr(exc)} + + if reply_to: + self.reply({self.hostname: reply}, + exchange=reply_to['exchange'], + routing_key=reply_to['routing_key'], + ticket=ticket) + return reply + + def handle(self, method, arguments={}): + return self.handlers[method](self.state, **arguments) + + def handle_call(self, method, arguments): + return self.handle(method, arguments) + + def handle_cast(self, method, arguments): + return self.handle(method, arguments) + + def handle_message(self, body, message=None): + destination = body.get('destination') + if message: + self.adjust_clock(message.headers.get('clock') or 0) + if not destination or self.hostname in destination: + return self.dispatch(**kwdict(body)) + dispatch_from_message = handle_message + + def reply(self, data, exchange, routing_key, ticket, **kwargs): + self.mailbox._publish_reply(data, exchange, routing_key, ticket, + channel=self.channel, + serializer=self.mailbox.serializer) + + +class Mailbox(object): + node_cls = Node + exchange_fmt = '%s.pidbox' + reply_exchange_fmt = 'reply.%s.pidbox' + + #: Name of application. + namespace = None + + #: Connection (if bound). + connection = None + + #: Exchange type (usually direct, or fanout for broadcast). + type = 'direct' + + #: mailbox exchange (init by constructor). + exchange = None + + #: exchange to send replies to. + reply_exchange = None + + #: Only accepts json messages by default. + accept = ['json'] + + #: Message serializer + serializer = None + + def __init__(self, namespace, + type='direct', connection=None, clock=None, + accept=None, serializer=None): + self.namespace = namespace + self.connection = connection + self.type = type + self.clock = LamportClock() if clock is None else clock + self.exchange = self._get_exchange(self.namespace, self.type) + self.reply_exchange = self._get_reply_exchange(self.namespace) + self._tls = local() + self.unclaimed = defaultdict(deque) + self.accept = self.accept if accept is None else accept + self.serializer = self.serializer if serializer is None else serializer + + def __call__(self, connection): + bound = copy(self) + bound.connection = connection + return bound + + def Node(self, hostname=None, state=None, channel=None, handlers=None): + hostname = hostname or socket.gethostname() + return self.node_cls(hostname, state, channel, handlers, mailbox=self) + + def call(self, destination, command, kwargs={}, + timeout=None, callback=None, channel=None): + return self._broadcast(command, kwargs, destination, + reply=True, timeout=timeout, + callback=callback, + channel=channel) + + def cast(self, destination, command, kwargs={}): + return self._broadcast(command, kwargs, destination, reply=False) + + def abcast(self, command, kwargs={}): + return self._broadcast(command, kwargs, reply=False) + + def multi_call(self, command, kwargs={}, timeout=1, + limit=None, callback=None, channel=None): + return self._broadcast(command, kwargs, reply=True, + timeout=timeout, limit=limit, + callback=callback, + channel=channel) + + def get_reply_queue(self): + oid = self.oid + return Queue( + '%s.%s' % (oid, self.reply_exchange.name), + exchange=self.reply_exchange, + routing_key=oid, + durable=False, + auto_delete=True, + queue_arguments={'x-expires': int(REPLY_QUEUE_EXPIRES * 1000)}, + ) + + @cached_property + def reply_queue(self): + return self.get_reply_queue() + + def get_queue(self, hostname): + return Queue('%s.%s.pidbox' % (hostname, self.namespace), + exchange=self.exchange, + durable=False, + auto_delete=True) + + def _publish_reply(self, reply, exchange, routing_key, ticket, + channel=None, **opts): + chan = channel or self.connection.default_channel + exchange = Exchange(exchange, exchange_type='direct', + delivery_mode='transient', + durable=False) + producer = Producer(chan, auto_declare=False) + try: + producer.publish( + reply, exchange=exchange, routing_key=routing_key, + declare=[exchange], headers={ + 'ticket': ticket, 'clock': self.clock.forward(), + }, + **opts + ) + except InconsistencyError: + pass # queue probably deleted and no one is expecting a reply. + + def _publish(self, type, arguments, destination=None, + reply_ticket=None, channel=None, timeout=None, + serializer=None): + message = {'method': type, + 'arguments': arguments, + 'destination': destination} + chan = channel or self.connection.default_channel + exchange = self.exchange + if reply_ticket: + maybe_declare(self.reply_queue(channel)) + message.update(ticket=reply_ticket, + reply_to={'exchange': self.reply_exchange.name, + 'routing_key': self.oid}) + serializer = serializer or self.serializer + producer = Producer(chan, auto_declare=False) + producer.publish( + message, exchange=exchange.name, declare=[exchange], + headers={'clock': self.clock.forward(), + 'expires': time() + timeout if timeout else 0}, + serializer=serializer, + ) + + def _broadcast(self, command, arguments=None, destination=None, + reply=False, timeout=1, limit=None, + callback=None, channel=None, serializer=None): + if destination is not None and \ + not isinstance(destination, (list, tuple)): + raise ValueError( + 'destination must be a list/tuple not {0}'.format( + type(destination))) + + arguments = arguments or {} + reply_ticket = reply and uuid() or None + chan = channel or self.connection.default_channel + + # Set reply limit to number of destinations (if specified) + if limit is None and destination: + limit = destination and len(destination) or None + + serializer = serializer or self.serializer + self._publish(command, arguments, destination=destination, + reply_ticket=reply_ticket, + channel=chan, + timeout=timeout, + serializer=serializer) + + if reply_ticket: + return self._collect(reply_ticket, limit=limit, + timeout=timeout, + callback=callback, + channel=chan) + + def _collect(self, ticket, + limit=None, timeout=1, callback=None, + channel=None, accept=None): + if accept is None: + accept = self.accept + chan = channel or self.connection.default_channel + queue = self.reply_queue + consumer = Consumer(channel, [queue], accept=accept, no_ack=True) + responses = [] + unclaimed = self.unclaimed + adjust_clock = self.clock.adjust + + try: + return unclaimed.pop(ticket) + except KeyError: + pass + + def on_message(body, message): + # ticket header added in kombu 2.5 + header = message.headers.get + adjust_clock(header('clock') or 0) + expires = header('expires') + if expires and time() > expires: + return + this_id = header('ticket', ticket) + if this_id == ticket: + if callback: + callback(body) + responses.append(body) + else: + unclaimed[this_id].append(body) + + consumer.register_callback(on_message) + try: + with consumer: + for i in limit and range(limit) or count(): + try: + self.connection.drain_events(timeout=timeout) + except socket.timeout: + break + return responses + finally: + chan.after_reply_message_received(queue.name) + + def _get_exchange(self, namespace, type): + return Exchange(self.exchange_fmt % namespace, + type=type, + durable=False, + delivery_mode='transient') + + def _get_reply_exchange(self, namespace): + return Exchange(self.reply_exchange_fmt % namespace, + type='direct', + durable=False, + delivery_mode='transient') + + @cached_property + def oid(self): + try: + return self._tls.OID + except AttributeError: + oid = self._tls.OID = oid_from(self) + return oid diff --git a/thesisenv/lib/python3.6/site-packages/kombu/pools.py b/thesisenv/lib/python3.6/site-packages/kombu/pools.py new file mode 100644 index 0000000..4d075e6 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/pools.py @@ -0,0 +1,153 @@ +""" +kombu.pools +=========== + +Public resource pools. + +""" +from __future__ import absolute_import + +import os + +from itertools import chain + +from .connection import Resource +from .five import range, values +from .messaging import Producer +from .utils import EqualityDict +from .utils.functional import lazy + +__all__ = ['ProducerPool', 'PoolGroup', 'register_group', + 'connections', 'producers', 'get_limit', 'set_limit', 'reset'] +_limit = [200] +_used = [False] +_groups = [] +use_global_limit = object() +disable_limit_protection = os.environ.get('KOMBU_DISABLE_LIMIT_PROTECTION') + + +class ProducerPool(Resource): + Producer = Producer + + def __init__(self, connections, *args, **kwargs): + self.connections = connections + self.Producer = kwargs.pop('Producer', None) or self.Producer + super(ProducerPool, self).__init__(*args, **kwargs) + + def _acquire_connection(self): + return self.connections.acquire(block=True) + + def create_producer(self): + conn = self._acquire_connection() + try: + return self.Producer(conn) + except BaseException: + conn.release() + raise + + def new(self): + return lazy(self.create_producer) + + def setup(self): + if self.limit: + for _ in range(self.limit): + self._resource.put_nowait(self.new()) + + def close_resource(self, resource): + pass + + def prepare(self, p): + if callable(p): + p = p() + if p._channel is None: + conn = self._acquire_connection() + try: + p.revive(conn) + except BaseException: + conn.release() + raise + return p + + def release(self, resource): + if resource.__connection__: + resource.__connection__.release() + resource.channel = None + super(ProducerPool, self).release(resource) + + +class PoolGroup(EqualityDict): + + def __init__(self, limit=None): + self.limit = limit + + def create(self, resource, limit): + raise NotImplementedError('PoolGroups must define ``create``') + + def __missing__(self, resource): + limit = self.limit + if limit is use_global_limit: + limit = get_limit() + if not _used[0]: + _used[0] = True + k = self[resource] = self.create(resource, limit) + return k + + +def register_group(group): + _groups.append(group) + return group + + +class Connections(PoolGroup): + + def create(self, connection, limit): + return connection.Pool(limit=limit) +connections = register_group(Connections(limit=use_global_limit)) + + +class Producers(PoolGroup): + + def create(self, connection, limit): + return ProducerPool(connections[connection], limit=limit) +producers = register_group(Producers(limit=use_global_limit)) + + +def _all_pools(): + return chain(*[(values(g) if g else iter([])) for g in _groups]) + + +def get_limit(): + return _limit[0] + + +def set_limit(limit, force=False, reset_after=False): + limit = limit or 0 + glimit = _limit[0] or 0 + if limit < glimit: + if not disable_limit_protection and (_used[0] and not force): + raise RuntimeError("Can't lower limit after pool in use.") + reset_after = True + if limit != glimit: + _limit[0] = limit + for pool in _all_pools(): + pool.limit = limit + if reset_after: + reset() + return limit + + +def reset(*args, **kwargs): + for pool in _all_pools(): + try: + pool.force_close_all() + except Exception: + pass + for group in _groups: + group.clear() + _used[0] = False + +try: + from multiprocessing.util import register_after_fork + register_after_fork(connections, reset) +except ImportError: # pragma: no cover + pass diff --git a/thesisenv/lib/python3.6/site-packages/kombu/serialization.py b/thesisenv/lib/python3.6/site-packages/kombu/serialization.py new file mode 100644 index 0000000..77f0be1 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/serialization.py @@ -0,0 +1,461 @@ +""" +kombu.serialization +=================== + +Serialization utilities. + +""" +from __future__ import absolute_import + +import codecs +import os +import sys + +import pickle as pypickle +try: + import cPickle as cpickle +except ImportError: # pragma: no cover + cpickle = None # noqa + +from collections import namedtuple +from contextlib import contextmanager + +from .exceptions import ( + ContentDisallowed, DecodeError, EncodeError, SerializerNotInstalled +) +from .five import BytesIO, reraise, text_t +from .utils import entrypoints +from .utils.encoding import str_to_bytes, bytes_t + +__all__ = ['pickle', 'loads', 'dumps', 'register', 'unregister'] +SKIP_DECODE = frozenset(['binary', 'ascii-8bit']) +TRUSTED_CONTENT = frozenset(['application/data', 'application/text']) + +if sys.platform.startswith('java'): # pragma: no cover + + def _decode(t, coding): + return codecs.getdecoder(coding)(t)[0] +else: + _decode = codecs.decode + +pickle = cpickle or pypickle +pickle_load = pickle.load + +#: Kombu requires Python 2.5 or later so we use protocol 2 by default. +#: There's a new protocol (3) but this is only supported by Python 3. +pickle_protocol = int(os.environ.get('PICKLE_PROTOCOL', 2)) + +codec = namedtuple('codec', ('content_type', 'content_encoding', 'encoder')) + + +@contextmanager +def _reraise_errors(wrapper, + include=(Exception, ), exclude=(SerializerNotInstalled, )): + try: + yield + except exclude: + raise + except include as exc: + reraise(wrapper, wrapper(exc), sys.exc_info()[2]) + + +def pickle_loads(s, load=pickle_load): + # used to support buffer objects + return load(BytesIO(s)) + + +def parenthesize_alias(first, second): + return '%s (%s)' % (first, second) if first else second + + +class SerializerRegistry(object): + """The registry keeps track of serialization methods.""" + + def __init__(self): + self._encoders = {} + self._decoders = {} + self._default_encode = None + self._default_content_type = None + self._default_content_encoding = None + self._disabled_content_types = set() + self.type_to_name = {} + self.name_to_type = {} + + def register(self, name, encoder, decoder, content_type, + content_encoding='utf-8'): + if encoder: + self._encoders[name] = codec( + content_type, content_encoding, encoder, + ) + if decoder: + self._decoders[content_type] = decoder + self.type_to_name[content_type] = name + self.name_to_type[name] = content_type + + def enable(self, name): + if '/' not in name: + name = self.name_to_type[name] + self._disabled_content_types.discard(name) + + def disable(self, name): + if '/' not in name: + name = self.name_to_type[name] + self._disabled_content_types.add(name) + + def unregister(self, name): + try: + content_type = self.name_to_type[name] + self._decoders.pop(content_type, None) + self._encoders.pop(name, None) + self.type_to_name.pop(content_type, None) + self.name_to_type.pop(name, None) + except KeyError: + raise SerializerNotInstalled( + 'No encoder/decoder installed for {0}'.format(name)) + + def _set_default_serializer(self, name): + """ + Set the default serialization method used by this library. + + :param name: The name of the registered serialization method. + For example, `json` (default), `pickle`, `yaml`, `msgpack`, + or any custom methods registered using :meth:`register`. + + :raises SerializerNotInstalled: If the serialization method + requested is not available. + """ + try: + (self._default_content_type, self._default_content_encoding, + self._default_encode) = self._encoders[name] + except KeyError: + raise SerializerNotInstalled( + 'No encoder installed for {0}'.format(name)) + + def dumps(self, data, serializer=None): + if serializer == 'raw': + return raw_encode(data) + if serializer and not self._encoders.get(serializer): + raise SerializerNotInstalled( + 'No encoder installed for {0}'.format(serializer)) + + # If a raw string was sent, assume binary encoding + # (it's likely either ASCII or a raw binary file, and a character + # set of 'binary' will encompass both, even if not ideal. + if not serializer and isinstance(data, bytes_t): + # In Python 3+, this would be "bytes"; allow binary data to be + # sent as a message without getting encoder errors + return 'application/data', 'binary', data + + # For Unicode objects, force it into a string + if not serializer and isinstance(data, text_t): + with _reraise_errors(EncodeError, exclude=()): + payload = data.encode('utf-8') + return 'text/plain', 'utf-8', payload + + if serializer: + content_type, content_encoding, encoder = \ + self._encoders[serializer] + else: + encoder = self._default_encode + content_type = self._default_content_type + content_encoding = self._default_content_encoding + + with _reraise_errors(EncodeError): + payload = encoder(data) + return content_type, content_encoding, payload + encode = dumps # XXX compat + + def loads(self, data, content_type, content_encoding, + accept=None, force=False, _trusted_content=TRUSTED_CONTENT): + content_type = content_type or 'application/data' + if accept is not None: + if content_type not in _trusted_content \ + and content_type not in accept: + raise self._for_untrusted_content(content_type, 'untrusted') + else: + if content_type in self._disabled_content_types and not force: + raise self._for_untrusted_content(content_type, 'disabled') + content_encoding = (content_encoding or 'utf-8').lower() + + if data: + decode = self._decoders.get(content_type) + if decode: + with _reraise_errors(DecodeError): + return decode(data) + if content_encoding not in SKIP_DECODE and \ + not isinstance(data, text_t): + with _reraise_errors(DecodeError): + return _decode(data, content_encoding) + return data + decode = loads # XXX compat + + def _for_untrusted_content(self, ctype, why): + return ContentDisallowed( + 'Refusing to deserialize {0} content of type {1}'.format( + why, + parenthesize_alias(self.type_to_name.get(ctype, ctype), ctype), + ), + ) + + +#: Global registry of serializers/deserializers. +registry = SerializerRegistry() + + +""" +.. function:: dumps(data, serializer=default_serializer) + + Serialize a data structure into a string suitable for sending + as an AMQP message body. + + :param data: The message data to send. Can be a list, + dictionary or a string. + + :keyword serializer: An optional string representing + the serialization method you want the data marshalled + into. (For example, `json`, `raw`, or `pickle`). + + If :const:`None` (default), then json will be used, unless + `data` is a :class:`str` or :class:`unicode` object. In this + latter case, no serialization occurs as it would be + unnecessary. + + Note that if `serializer` is specified, then that + serialization method will be used even if a :class:`str` + or :class:`unicode` object is passed in. + + :returns: A three-item tuple containing the content type + (e.g., `application/json`), content encoding, (e.g., + `utf-8`) and a string containing the serialized + data. + + :raises SerializerNotInstalled: If the serialization method + requested is not available. +""" +dumps = encode = registry.encode # XXX encode is a compat alias + +""" +.. function:: loads(data, content_type, content_encoding): + + Deserialize a data stream as serialized using `dumps` + based on `content_type`. + + :param data: The message data to deserialize. + + :param content_type: The content-type of the data. + (e.g., `application/json`). + + :param content_encoding: The content-encoding of the data. + (e.g., `utf-8`, `binary`, or `us-ascii`). + + :returns: The unserialized data. + +""" +loads = decode = registry.decode # XXX decode is a compat alias + + +""" +.. function:: register(name, encoder, decoder, content_type, + content_encoding='utf-8'): + Register a new encoder/decoder. + + :param name: A convenience name for the serialization method. + + :param encoder: A method that will be passed a python data structure + and should return a string representing the serialized data. + If :const:`None`, then only a decoder will be registered. Encoding + will not be possible. + + :param decoder: A method that will be passed a string representing + serialized data and should return a python data structure. + If :const:`None`, then only an encoder will be registered. + Decoding will not be possible. + + :param content_type: The mime-type describing the serialized + structure. + + :param content_encoding: The content encoding (character set) that + the `decoder` method will be returning. Will usually be + `utf-8`, `us-ascii`, or `binary`. + +""" +register = registry.register + + +""" +.. function:: unregister(name): + Unregister registered encoder/decoder. + + :param name: Registered serialization method name. + +""" +unregister = registry.unregister + + +def raw_encode(data): + """Special case serializer.""" + content_type = 'application/data' + payload = data + if isinstance(payload, text_t): + content_encoding = 'utf-8' + with _reraise_errors(EncodeError, exclude=()): + payload = payload.encode(content_encoding) + else: + content_encoding = 'binary' + return content_type, content_encoding, payload + + +def register_json(): + """Register a encoder/decoder for JSON serialization.""" + from anyjson import loads as json_loads, dumps as json_dumps + + def _loads(obj): + if isinstance(obj, bytes_t): + obj = obj.decode('utf-8') + return json_loads(obj) + + registry.register('json', json_dumps, _loads, + content_type='application/json', + content_encoding='utf-8') + + +def register_yaml(): + """Register a encoder/decoder for YAML serialization. + + It is slower than JSON, but allows for more data types + to be serialized. Useful if you need to send data such as dates""" + try: + import yaml + registry.register('yaml', yaml.safe_dump, yaml.safe_load, + content_type='application/x-yaml', + content_encoding='utf-8') + except ImportError: + + def not_available(*args, **kwargs): + """In case a client receives a yaml message, but yaml + isn't installed.""" + raise SerializerNotInstalled( + 'No decoder installed for YAML. Install the PyYAML library') + registry.register('yaml', None, not_available, 'application/x-yaml') + + +if sys.version_info[0] == 3: # pragma: no cover + + def unpickle(s): + return pickle_loads(str_to_bytes(s)) + +else: + unpickle = pickle_loads # noqa + + +def register_pickle(): + """The fastest serialization method, but restricts + you to python clients.""" + + def pickle_dumps(obj, dumper=pickle.dumps): + return dumper(obj, protocol=pickle_protocol) + + registry.register('pickle', pickle_dumps, unpickle, + content_type='application/x-python-serialize', + content_encoding='binary') + + +def register_msgpack(): + """See http://msgpack.sourceforge.net/""" + pack = unpack = None + try: + import msgpack + if msgpack.version >= (0, 4): + from msgpack import packb, unpackb + + def pack(s): + return packb(s, use_bin_type=True) + + def unpack(s): + return unpackb(s, encoding='utf-8') + else: + def version_mismatch(*args, **kwargs): + raise SerializerNotInstalled( + 'msgpack requires msgpack-python >= 0.4.0') + pack = unpack = version_mismatch + except (ImportError, ValueError): + def not_available(*args, **kwargs): + raise SerializerNotInstalled( + 'No decoder installed for msgpack. ' + 'Please install the msgpack-python library') + pack = unpack = not_available + registry.register( + 'msgpack', pack, unpack, + content_type='application/x-msgpack', + content_encoding='binary', + ) + +# Register the base serialization methods. +register_json() +register_pickle() +register_yaml() +register_msgpack() + +# Default serializer is 'json' +registry._set_default_serializer('json') + + +_setupfuns = { + 'json': register_json, + 'pickle': register_pickle, + 'yaml': register_yaml, + 'msgpack': register_msgpack, + 'application/json': register_json, + 'application/x-yaml': register_yaml, + 'application/x-python-serialize': register_pickle, + 'application/x-msgpack': register_msgpack, +} + + +def enable_insecure_serializers(choices=['pickle', 'yaml', 'msgpack']): + """Enable serializers that are considered to be unsafe. + + Will enable ``pickle``, ``yaml`` and ``msgpack`` by default, + but you can also specify a list of serializers (by name or content type) + to enable. + + """ + for choice in choices: + try: + registry.enable(choice) + except KeyError: + pass + + +def disable_insecure_serializers(allowed=['json']): + """Disable untrusted serializers. + + Will disable all serializers except ``json`` + or you can specify a list of deserializers to allow. + + .. note:: + + Producers will still be able to serialize data + in these formats, but consumers will not accept + incoming data using the untrusted content types. + + """ + for name in registry._decoders: + registry.disable(name) + if allowed is not None: + for name in allowed: + registry.enable(name) + + +# Insecure serializers are disabled by default since v3.0 +disable_insecure_serializers() + +# Load entrypoints from installed extensions +for ep, args in entrypoints('kombu.serializers'): # pragma: no cover + register(ep.name, *args) + + +def prepare_accept_content(l, name_to_type=registry.name_to_type): + if l is not None: + return set(n if '/' in n else name_to_type[n] for n in l) + return l diff --git a/thesisenv/lib/python3.6/site-packages/kombu/simple.py b/thesisenv/lib/python3.6/site-packages/kombu/simple.py new file mode 100644 index 0000000..a92d9f6 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/simple.py @@ -0,0 +1,137 @@ +""" +kombu.simple +============ + +Simple interface. + +""" +from __future__ import absolute_import + +import socket + +from collections import deque + +from . import entity +from . import messaging +from .connection import maybe_channel +from .five import Empty, monotonic + +__all__ = ['SimpleQueue', 'SimpleBuffer'] + + +class SimpleBase(object): + Empty = Empty + _consuming = False + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + + def __init__(self, channel, producer, consumer, no_ack=False): + self.channel = maybe_channel(channel) + self.producer = producer + self.consumer = consumer + self.no_ack = no_ack + self.queue = self.consumer.queues[0] + self.buffer = deque() + self.consumer.register_callback(self._receive) + + def get(self, block=True, timeout=None): + if not block: + return self.get_nowait() + self._consume() + elapsed = 0.0 + remaining = timeout + while True: + time_start = monotonic() + if self.buffer: + return self.buffer.popleft() + try: + self.channel.connection.client.drain_events( + timeout=timeout and remaining) + except socket.timeout: + raise self.Empty() + elapsed += monotonic() - time_start + remaining = timeout and timeout - elapsed or None + + def get_nowait(self): + m = self.queue.get(no_ack=self.no_ack) + if not m: + raise self.Empty() + return m + + def put(self, message, serializer=None, headers=None, compression=None, + routing_key=None, **kwargs): + self.producer.publish(message, + serializer=serializer, + routing_key=routing_key, + headers=headers, + compression=compression, + **kwargs) + + def clear(self): + return self.consumer.purge() + + def qsize(self): + _, size, _ = self.queue.queue_declare(passive=True) + return size + + def close(self): + self.consumer.cancel() + + def _receive(self, message_data, message): + self.buffer.append(message) + + def _consume(self): + if not self._consuming: + self.consumer.consume(no_ack=self.no_ack) + self._consuming = True + + def __len__(self): + """`len(self) -> self.qsize()`""" + return self.qsize() + + def __bool__(self): + return True + __nonzero__ = __bool__ + + +class SimpleQueue(SimpleBase): + no_ack = False + queue_opts = {} + exchange_opts = {'type': 'direct'} + + def __init__(self, channel, name, no_ack=None, queue_opts=None, + exchange_opts=None, serializer=None, + compression=None, **kwargs): + queue = name + queue_opts = dict(self.queue_opts, **queue_opts or {}) + exchange_opts = dict(self.exchange_opts, **exchange_opts or {}) + if no_ack is None: + no_ack = self.no_ack + if not isinstance(queue, entity.Queue): + exchange = entity.Exchange(name, **exchange_opts) + queue = entity.Queue(name, exchange, name, **queue_opts) + routing_key = name + else: + name = queue.name + exchange = queue.exchange + routing_key = queue.routing_key + producer = messaging.Producer(channel, exchange, + serializer=serializer, + routing_key=routing_key, + compression=compression) + consumer = messaging.Consumer(channel, queue) + super(SimpleQueue, self).__init__(channel, producer, + consumer, no_ack, **kwargs) + + +class SimpleBuffer(SimpleQueue): + no_ack = True + queue_opts = dict(durable=False, + auto_delete=True) + exchange_opts = dict(durable=False, + delivery_mode='transient', + auto_delete=True) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/syn.py b/thesisenv/lib/python3.6/site-packages/kombu/syn.py new file mode 100644 index 0000000..01b4d47 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/syn.py @@ -0,0 +1,53 @@ +""" +kombu.syn +========= + +""" +from __future__ import absolute_import + +import sys + +__all__ = ['detect_environment'] + +_environment = None + + +def blocking(fun, *args, **kwargs): + return fun(*args, **kwargs) + + +def select_blocking_method(type): + pass + + +def _detect_environment(): + # ## -eventlet- + if 'eventlet' in sys.modules: + try: + from eventlet.patcher import is_monkey_patched as is_eventlet + import socket + + if is_eventlet(socket): + return 'eventlet' + except ImportError: + pass + + # ## -gevent- + if 'gevent' in sys.modules: + try: + from gevent import socket as _gsocket + import socket + + if socket.socket is _gsocket.socket: + return 'gevent' + except ImportError: + pass + + return 'default' + + +def detect_environment(): + global _environment + if _environment is None: + _environment = _detect_environment() + return _environment diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/__init__.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/__init__.py new file mode 100644 index 0000000..fb9f21a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/__init__.py @@ -0,0 +1,91 @@ +from __future__ import absolute_import + +import anyjson +import atexit +import os +import sys + +from kombu.exceptions import VersionMismatch + +# avoid json implementation inconsistencies. +try: + import json # noqa + anyjson.force_implementation('json') +except ImportError: + anyjson.force_implementation('simplejson') + + +def teardown(): + # Workaround for multiprocessing bug where logging + # is attempted after global already collected at shutdown. + cancelled = set() + try: + import multiprocessing.util + cancelled.add(multiprocessing.util._exit_function) + except (AttributeError, ImportError): + pass + + try: + atexit._exithandlers[:] = [ + e for e in atexit._exithandlers if e[0] not in cancelled + ] + except AttributeError: # pragma: no cover + pass # Py3 missing _exithandlers + + +def find_distribution_modules(name=__name__, file=__file__): + current_dist_depth = len(name.split('.')) - 1 + current_dist = os.path.join(os.path.dirname(file), + *([os.pardir] * current_dist_depth)) + abs = os.path.abspath(current_dist) + dist_name = os.path.basename(abs) + + for dirpath, dirnames, filenames in os.walk(abs): + package = (dist_name + dirpath[len(abs):]).replace('/', '.') + if '__init__.py' in filenames: + yield package + for filename in filenames: + if filename.endswith('.py') and filename != '__init__.py': + yield '.'.join([package, filename])[:-3] + + +def import_all_modules(name=__name__, file=__file__, skip=[]): + for module in find_distribution_modules(name, file): + if module not in skip: + print('preimporting %r for coverage...' % (module, )) + try: + __import__(module) + except (ImportError, VersionMismatch, AttributeError): + pass + + +def is_in_coverage(): + return (os.environ.get('COVER_ALL_MODULES') or + '--with-coverage3' in sys.argv) + + +def setup_django_env(): + try: + from django.conf import settings + except ImportError: + return + + if not settings.configured: + settings.configure( + DATABASES={ + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': ':memory:', + }, + }, + DATABASE_ENGINE='sqlite3', + DATABASE_NAME=':memory:', + INSTALLED_APPS=('kombu.transport.django', ), + ) + + +def setup(): + # so coverage sees all our modules. + setup_django_env() + if is_in_coverage(): + import_all_modules() diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/async/__init__.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/async/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/async/test_hub.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/async/test_hub.py new file mode 100644 index 0000000..7d5d81c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/async/test_hub.py @@ -0,0 +1,33 @@ +from __future__ import absolute_import + +from kombu.async import hub as _hub +from kombu.async.hub import Hub, get_event_loop, set_event_loop + +from kombu.tests.case import Case + + +class test_Utils(Case): + + def setUp(self): + self._prev_loop = get_event_loop() + + def tearDown(self): + set_event_loop(self._prev_loop) + + def test_get_set_event_loop(self): + set_event_loop(None) + self.assertIsNone(_hub._current_loop) + self.assertIsNone(get_event_loop()) + hub = Hub() + set_event_loop(hub) + self.assertIs(_hub._current_loop, hub) + self.assertIs(get_event_loop(), hub) + + +class test_Hub(Case): + + def setUp(self): + self.hub = Hub() + + def tearDown(self): + self.hub.close() diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/async/test_semaphore.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/async/test_semaphore.py new file mode 100644 index 0000000..5ca48de --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/async/test_semaphore.py @@ -0,0 +1,45 @@ +from __future__ import absolute_import + +from kombu.async.semaphore import LaxBoundedSemaphore + +from kombu.tests.case import Case + + +class test_LaxBoundedSemaphore(Case): + + def test_over_release(self): + x = LaxBoundedSemaphore(2) + calls = [] + for i in range(1, 21): + x.acquire(calls.append, i) + x.release() + x.acquire(calls.append, 'x') + x.release() + x.acquire(calls.append, 'y') + + self.assertEqual(calls, [1, 2, 3, 4]) + + for i in range(30): + x.release() + self.assertEqual(calls, list(range(1, 21)) + ['x', 'y']) + self.assertEqual(x.value, x.initial_value) + + calls[:] = [] + for i in range(1, 11): + x.acquire(calls.append, i) + for i in range(1, 11): + x.release() + self.assertEqual(calls, list(range(1, 11))) + + calls[:] = [] + self.assertEqual(x.value, x.initial_value) + x.acquire(calls.append, 'x') + self.assertEqual(x.value, 1) + x.acquire(calls.append, 'y') + self.assertEqual(x.value, 0) + x.release() + self.assertEqual(x.value, 1) + x.release() + self.assertEqual(x.value, 2) + x.release() + self.assertEqual(x.value, 2) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/case.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/case.py new file mode 100644 index 0000000..f2caebd --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/case.py @@ -0,0 +1,219 @@ +from __future__ import absolute_import + +import os +import sys +import types + +from functools import wraps + +try: + from unittest import mock +except ImportError: + import mock # noqa + +from nose import SkipTest + +from kombu.five import builtins, string_t, StringIO +from kombu.utils.encoding import ensure_bytes + +try: + import unittest + unittest.skip +except AttributeError: + import unittest2 as unittest # noqa + +PY3 = sys.version_info[0] == 3 + +MagicMock = mock.MagicMock +patch = mock.patch +call = mock.call + + +class Case(unittest.TestCase): + + def assertItemsEqual(self, a, b, *args, **kwargs): + return self.assertEqual(sorted(a), sorted(b), *args, **kwargs) + assertSameElements = assertItemsEqual + + +class Mock(mock.Mock): + + def __init__(self, *args, **kwargs): + attrs = kwargs.pop('attrs', None) or {} + super(Mock, self).__init__(*args, **kwargs) + for attr_name, attr_value in attrs.items(): + setattr(self, attr_name, attr_value) + + +class _ContextMock(Mock): + """Dummy class implementing __enter__ and __exit__ + as the with statement requires these to be implemented + in the class, not just the instance.""" + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + pass + + +def ContextMock(*args, **kwargs): + obj = _ContextMock(*args, **kwargs) + obj.attach_mock(Mock(), '__enter__') + obj.attach_mock(Mock(), '__exit__') + obj.__enter__.return_value = obj + # if __exit__ return a value the exception is ignored, + # so it must return None here. + obj.__exit__.return_value = None + return obj + + +class MockPool(object): + + def __init__(self, value=None): + self.value = value or ContextMock() + + def acquire(self, **kwargs): + return self.value + + +def redirect_stdouts(fun): + + @wraps(fun) + def _inner(*args, **kwargs): + sys.stdout = StringIO() + sys.stderr = StringIO() + try: + return fun(*args, **dict(kwargs, + stdout=sys.stdout, stderr=sys.stderr)) + finally: + sys.stdout = sys.__stdout__ + sys.stderr = sys.__stderr__ + + return _inner + + +def module_exists(*modules): + + def _inner(fun): + + @wraps(fun) + def __inner(*args, **kwargs): + gen = [] + for module in modules: + if isinstance(module, string_t): + if not PY3: + module = ensure_bytes(module) + module = types.ModuleType(module) + gen.append(module) + sys.modules[module.__name__] = module + name = module.__name__ + if '.' in name: + parent, _, attr = name.rpartition('.') + setattr(sys.modules[parent], attr, module) + try: + return fun(*args, **kwargs) + finally: + for module in gen: + sys.modules.pop(module.__name__, None) + + return __inner + return _inner + + +# Taken from +# http://bitbucket.org/runeh/snippets/src/tip/missing_modules.py +def mask_modules(*modnames): + def _inner(fun): + + @wraps(fun) + def __inner(*args, **kwargs): + realimport = builtins.__import__ + + def myimp(name, *args, **kwargs): + if name in modnames: + raise ImportError('No module named %s' % name) + else: + return realimport(name, *args, **kwargs) + + builtins.__import__ = myimp + try: + return fun(*args, **kwargs) + finally: + builtins.__import__ = realimport + + return __inner + return _inner + + +def skip_if_environ(env_var_name): + + def _wrap_test(fun): + + @wraps(fun) + def _skips_if_environ(*args, **kwargs): + if os.environ.get(env_var_name): + raise SkipTest('SKIP %s: %s set\n' % ( + fun.__name__, env_var_name)) + return fun(*args, **kwargs) + + return _skips_if_environ + + return _wrap_test + + +def skip_if_module(module): + def _wrap_test(fun): + @wraps(fun) + def _skip_if_module(*args, **kwargs): + try: + __import__(module) + raise SkipTest('SKIP %s: %s available\n' % ( + fun.__name__, module)) + except ImportError: + pass + return fun(*args, **kwargs) + return _skip_if_module + return _wrap_test + + +def skip_if_not_module(module, import_errors=(ImportError, )): + def _wrap_test(fun): + @wraps(fun) + def _skip_if_not_module(*args, **kwargs): + try: + __import__(module) + except import_errors: + raise SkipTest('SKIP %s: %s available\n' % ( + fun.__name__, module)) + return fun(*args, **kwargs) + return _skip_if_not_module + return _wrap_test + + +def skip_if_quick(fun): + return skip_if_environ('QUICKTEST')(fun) + + +def case_no_pypy(cls): + setup = cls.setUp + + @wraps(setup) + def around_setup(self): + if getattr(sys, 'pypy_version_info', None): + raise SkipTest('pypy incompatible') + setup(self) + cls.setUp = around_setup + return cls + + +def case_no_python3(cls): + setup = cls.setUp + + @wraps(setup) + def around_setup(self): + if PY3: + raise SkipTest('Python3 incompatible') + setup(self) + cls.setUp = around_setup + return cls diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/mocks.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/mocks.py new file mode 100644 index 0000000..836457e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/mocks.py @@ -0,0 +1,148 @@ +from __future__ import absolute_import + +from itertools import count + +import anyjson + +from kombu.transport import base + + +class Message(base.Message): + + def __init__(self, *args, **kwargs): + self.throw_decode_error = kwargs.get('throw_decode_error', False) + super(Message, self).__init__(*args, **kwargs) + + def decode(self): + if self.throw_decode_error: + raise ValueError("can't decode message") + return super(Message, self).decode() + + +class Channel(base.StdChannel): + open = True + throw_decode_error = False + _ids = count(1) + + def __init__(self, connection): + self.connection = connection + self.called = [] + self.deliveries = count(1) + self.to_deliver = [] + self.events = {'basic_return': set()} + self.channel_id = next(self._ids) + + def _called(self, name): + self.called.append(name) + + def __contains__(self, key): + return key in self.called + + def exchange_declare(self, *args, **kwargs): + self._called('exchange_declare') + + def prepare_message(self, body, priority=0, content_type=None, + content_encoding=None, headers=None, properties={}): + self._called('prepare_message') + return dict(body=body, + headers=headers, + properties=properties, + priority=priority, + content_type=content_type, + content_encoding=content_encoding) + + def basic_publish(self, message, exchange='', routing_key='', + mandatory=False, immediate=False, **kwargs): + self._called('basic_publish') + return message, exchange, routing_key + + def exchange_delete(self, *args, **kwargs): + self._called('exchange_delete') + + def queue_declare(self, *args, **kwargs): + self._called('queue_declare') + + def queue_bind(self, *args, **kwargs): + self._called('queue_bind') + + def queue_unbind(self, *args, **kwargs): + self._called('queue_unbind') + + def queue_delete(self, queue, if_unused=False, if_empty=False, **kwargs): + self._called('queue_delete') + + def basic_get(self, *args, **kwargs): + self._called('basic_get') + try: + return self.to_deliver.pop() + except IndexError: + pass + + def queue_purge(self, *args, **kwargs): + self._called('queue_purge') + + def basic_consume(self, *args, **kwargs): + self._called('basic_consume') + + def basic_cancel(self, *args, **kwargs): + self._called('basic_cancel') + + def basic_ack(self, *args, **kwargs): + self._called('basic_ack') + + def basic_recover(self, requeue=False): + self._called('basic_recover') + + def exchange_bind(self, *args, **kwargs): + self._called('exchange_bind') + + def exchange_unbind(self, *args, **kwargs): + self._called('exchange_unbind') + + def close(self): + self._called('close') + + def message_to_python(self, message, *args, **kwargs): + self._called('message_to_python') + return Message(self, body=anyjson.dumps(message), + delivery_tag=next(self.deliveries), + throw_decode_error=self.throw_decode_error, + content_type='application/json', + content_encoding='utf-8') + + def flow(self, active): + self._called('flow') + + def basic_reject(self, delivery_tag, requeue=False): + if requeue: + return self._called('basic_reject:requeue') + return self._called('basic_reject') + + def basic_qos(self, prefetch_size=0, prefetch_count=0, + apply_global=False): + self._called('basic_qos') + + +class Connection(object): + connected = True + + def __init__(self, client): + self.client = client + + def channel(self): + return Channel(self) + + +class Transport(base.Transport): + + def establish_connection(self): + return Connection(self.client) + + def create_channel(self, connection): + return connection.channel() + + def drain_events(self, connection, **kwargs): + return 'event' + + def close_connection(self, connection): + connection.connected = False diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/test_clocks.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_clocks.py new file mode 100644 index 0000000..fa39b6d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_clocks.py @@ -0,0 +1,104 @@ +from __future__ import absolute_import + +import pickle + +from heapq import heappush +from time import time + +from kombu.clocks import LamportClock, timetuple + +from .case import Mock, Case + + +class test_LamportClock(Case): + + def test_clocks(self): + c1 = LamportClock() + c2 = LamportClock() + + c1.forward() + c2.forward() + c1.forward() + c1.forward() + c2.adjust(c1.value) + self.assertEqual(c2.value, c1.value + 1) + self.assertTrue(repr(c1)) + + c2_val = c2.value + c2.forward() + c2.forward() + c2.adjust(c1.value) + self.assertEqual(c2.value, c2_val + 2 + 1) + + c1.adjust(c2.value) + self.assertEqual(c1.value, c2.value + 1) + + def test_sort(self): + c = LamportClock() + pid1 = 'a.example.com:312' + pid2 = 'b.example.com:311' + + events = [] + + m1 = (c.forward(), pid1) + heappush(events, m1) + m2 = (c.forward(), pid2) + heappush(events, m2) + m3 = (c.forward(), pid1) + heappush(events, m3) + m4 = (30, pid1) + heappush(events, m4) + m5 = (30, pid2) + heappush(events, m5) + + self.assertEqual(str(c), str(c.value)) + + self.assertEqual(c.sort_heap(events), m1) + self.assertEqual(c.sort_heap([m4, m5]), m4) + self.assertEqual(c.sort_heap([m4, m5, m1]), m4) + + +class test_timetuple(Case): + + def test_repr(self): + x = timetuple(133, time(), 'id', Mock()) + self.assertTrue(repr(x)) + + def test_pickleable(self): + x = timetuple(133, time(), 'id', 'obj') + self.assertEqual(pickle.loads(pickle.dumps(x)), tuple(x)) + + def test_order(self): + t1 = time() + t2 = time() + 300 # windows clock not reliable + a = timetuple(133, t1, 'A', 'obj') + b = timetuple(140, t1, 'A', 'obj') + self.assertTrue(a.__getnewargs__()) + self.assertEqual(a.clock, 133) + self.assertEqual(a.timestamp, t1) + self.assertEqual(a.id, 'A') + self.assertEqual(a.obj, 'obj') + self.assertTrue( + a <= b, + ) + self.assertTrue( + b >= a, + ) + + self.assertEqual( + timetuple(134, time(), 'A', 'obj').__lt__(tuple()), + NotImplemented, + ) + self.assertGreater( + timetuple(134, t2, 'A', 'obj'), + timetuple(133, t1, 'A', 'obj'), + ) + self.assertGreater( + timetuple(134, t1, 'B', 'obj'), + timetuple(134, t1, 'A', 'obj'), + ) + + self.assertGreater( + timetuple(None, t2, 'B', 'obj'), + timetuple(None, t1, 'A', 'obj'), + ) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/test_common.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_common.py new file mode 100644 index 0000000..9f7df92 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_common.py @@ -0,0 +1,419 @@ +from __future__ import absolute_import + +import socket + +from amqp import RecoverableConnectionError + +from kombu import common +from kombu.common import ( + Broadcast, maybe_declare, + send_reply, collect_replies, + declaration_cached, ignore_errors, + QoS, PREFETCH_COUNT_MAX, +) + +from .case import Case, ContextMock, Mock, MockPool, patch + + +class test_ignore_errors(Case): + + def test_ignored(self): + connection = Mock() + connection.channel_errors = (KeyError, ) + connection.connection_errors = (KeyError, ) + + with ignore_errors(connection): + raise KeyError() + + def raising(): + raise KeyError() + + ignore_errors(connection, raising) + + connection.channel_errors = connection.connection_errors = \ + () + + with self.assertRaises(KeyError): + with ignore_errors(connection): + raise KeyError() + + +class test_declaration_cached(Case): + + def test_when_cached(self): + chan = Mock() + chan.connection.client.declared_entities = ['foo'] + self.assertTrue(declaration_cached('foo', chan)) + + def test_when_not_cached(self): + chan = Mock() + chan.connection.client.declared_entities = ['bar'] + self.assertFalse(declaration_cached('foo', chan)) + + +class test_Broadcast(Case): + + def test_arguments(self): + q = Broadcast(name='test_Broadcast') + self.assertTrue(q.name.startswith('bcast.')) + self.assertEqual(q.alias, 'test_Broadcast') + self.assertTrue(q.auto_delete) + self.assertEqual(q.exchange.name, 'test_Broadcast') + self.assertEqual(q.exchange.type, 'fanout') + + q = Broadcast('test_Broadcast', 'explicit_queue_name') + self.assertEqual(q.name, 'explicit_queue_name') + self.assertEqual(q.exchange.name, 'test_Broadcast') + + q2 = q(Mock()) + self.assertEqual(q2.name, q.name) + + +class test_maybe_declare(Case): + + def test_cacheable(self): + channel = Mock() + client = channel.connection.client = Mock() + client.declared_entities = set() + entity = Mock() + entity.can_cache_declaration = True + entity.auto_delete = False + entity.is_bound = True + entity.channel = channel + + maybe_declare(entity, channel) + self.assertEqual(entity.declare.call_count, 1) + self.assertIn( + hash(entity), channel.connection.client.declared_entities, + ) + + maybe_declare(entity, channel) + self.assertEqual(entity.declare.call_count, 1) + + entity.channel.connection = None + with self.assertRaises(RecoverableConnectionError): + maybe_declare(entity) + + def test_binds_entities(self): + channel = Mock() + channel.connection.client.declared_entities = set() + entity = Mock() + entity.can_cache_declaration = True + entity.is_bound = False + entity.bind.return_value = entity + entity.bind.return_value.channel = channel + + maybe_declare(entity, channel) + entity.bind.assert_called_with(channel) + + def test_with_retry(self): + channel = Mock() + client = channel.connection.client = Mock() + client.declared_entities = set() + entity = Mock() + entity.can_cache_declaration = True + entity.is_bound = True + entity.channel = channel + + maybe_declare(entity, channel, retry=True) + self.assertTrue(channel.connection.client.ensure.call_count) + + +class test_replies(Case): + + def test_send_reply(self): + req = Mock() + req.content_type = 'application/json' + req.content_encoding = 'binary' + req.properties = {'reply_to': 'hello', + 'correlation_id': 'world'} + channel = Mock() + exchange = Mock() + exchange.is_bound = True + exchange.channel = channel + producer = Mock() + producer.channel = channel + producer.channel.connection.client.declared_entities = set() + send_reply(exchange, req, {'hello': 'world'}, producer) + + self.assertTrue(producer.publish.call_count) + args = producer.publish.call_args + self.assertDictEqual(args[0][0], {'hello': 'world'}) + self.assertDictEqual(args[1], {'exchange': exchange, + 'routing_key': 'hello', + 'correlation_id': 'world', + 'serializer': 'json', + 'retry': False, + 'retry_policy': None, + 'content_encoding': 'binary'}) + + @patch('kombu.common.itermessages') + def test_collect_replies_with_ack(self, itermessages): + conn, channel, queue = Mock(), Mock(), Mock() + body, message = Mock(), Mock() + itermessages.return_value = [(body, message)] + it = collect_replies(conn, channel, queue, no_ack=False) + m = next(it) + self.assertIs(m, body) + itermessages.assert_called_with(conn, channel, queue, no_ack=False) + message.ack.assert_called_with() + + with self.assertRaises(StopIteration): + next(it) + + channel.after_reply_message_received.assert_called_with(queue.name) + + @patch('kombu.common.itermessages') + def test_collect_replies_no_ack(self, itermessages): + conn, channel, queue = Mock(), Mock(), Mock() + body, message = Mock(), Mock() + itermessages.return_value = [(body, message)] + it = collect_replies(conn, channel, queue) + m = next(it) + self.assertIs(m, body) + itermessages.assert_called_with(conn, channel, queue, no_ack=True) + self.assertFalse(message.ack.called) + + @patch('kombu.common.itermessages') + def test_collect_replies_no_replies(self, itermessages): + conn, channel, queue = Mock(), Mock(), Mock() + itermessages.return_value = [] + it = collect_replies(conn, channel, queue) + with self.assertRaises(StopIteration): + next(it) + + self.assertFalse(channel.after_reply_message_received.called) + + +class test_insured(Case): + + @patch('kombu.common.logger') + def test_ensure_errback(self, logger): + common._ensure_errback('foo', 30) + self.assertTrue(logger.error.called) + + def test_revive_connection(self): + on_revive = Mock() + channel = Mock() + common.revive_connection(Mock(), channel, on_revive) + on_revive.assert_called_with(channel) + + common.revive_connection(Mock(), channel, None) + + def get_insured_mocks(self, insured_returns=('works', 'ignored')): + conn = ContextMock() + pool = MockPool(conn) + fun = Mock() + insured = conn.autoretry.return_value = Mock() + insured.return_value = insured_returns + return conn, pool, fun, insured + + def test_insured(self): + conn, pool, fun, insured = self.get_insured_mocks() + + ret = common.insured(pool, fun, (2, 2), {'foo': 'bar'}) + self.assertEqual(ret, 'works') + conn.ensure_connection.assert_called_with( + errback=common._ensure_errback, + ) + + self.assertTrue(insured.called) + i_args, i_kwargs = insured.call_args + self.assertTupleEqual(i_args, (2, 2)) + self.assertDictEqual(i_kwargs, {'foo': 'bar', + 'connection': conn}) + + self.assertTrue(conn.autoretry.called) + ar_args, ar_kwargs = conn.autoretry.call_args + self.assertTupleEqual(ar_args, (fun, conn.default_channel)) + self.assertTrue(ar_kwargs.get('on_revive')) + self.assertTrue(ar_kwargs.get('errback')) + + def test_insured_custom_errback(self): + conn, pool, fun, insured = self.get_insured_mocks() + + custom_errback = Mock() + common.insured(pool, fun, (2, 2), {'foo': 'bar'}, + errback=custom_errback) + conn.ensure_connection.assert_called_with(errback=custom_errback) + + +class MockConsumer(object): + consumers = set() + + def __init__(self, channel, queues=None, callbacks=None, **kwargs): + self.channel = channel + self.queues = queues + self.callbacks = callbacks + + def __enter__(self): + self.consumers.add(self) + return self + + def __exit__(self, *exc_info): + self.consumers.discard(self) + + +class test_itermessages(Case): + + class MockConnection(object): + should_raise_timeout = False + + def drain_events(self, **kwargs): + if self.should_raise_timeout: + raise socket.timeout() + for consumer in MockConsumer.consumers: + for callback in consumer.callbacks: + callback('body', 'message') + + def test_default(self): + conn = self.MockConnection() + channel = Mock() + channel.connection.client = conn + conn.Consumer = MockConsumer + it = common.itermessages(conn, channel, 'q', limit=1) + + ret = next(it) + self.assertTupleEqual(ret, ('body', 'message')) + + with self.assertRaises(StopIteration): + next(it) + + def test_when_raises_socket_timeout(self): + conn = self.MockConnection() + conn.should_raise_timeout = True + channel = Mock() + channel.connection.client = conn + conn.Consumer = MockConsumer + it = common.itermessages(conn, channel, 'q', limit=1) + + with self.assertRaises(StopIteration): + next(it) + + @patch('kombu.common.deque') + def test_when_raises_IndexError(self, deque): + deque_instance = deque.return_value = Mock() + deque_instance.popleft.side_effect = IndexError() + conn = self.MockConnection() + channel = Mock() + conn.Consumer = MockConsumer + it = common.itermessages(conn, channel, 'q', limit=1) + + with self.assertRaises(StopIteration): + next(it) + + +class test_QoS(Case): + + class _QoS(QoS): + def __init__(self, value): + self.value = value + QoS.__init__(self, None, value) + + def set(self, value): + return value + + def test_qos_exceeds_16bit(self): + with patch('kombu.common.logger') as logger: + callback = Mock() + qos = QoS(callback, 10) + qos.prev = 100 + # cannot use 2 ** 32 because of a bug on OSX Py2.5: + # https://jira.mongodb.org/browse/PYTHON-389 + qos.set(4294967296) + self.assertTrue(logger.warn.called) + callback.assert_called_with(prefetch_count=0) + + def test_qos_increment_decrement(self): + qos = self._QoS(10) + self.assertEqual(qos.increment_eventually(), 11) + self.assertEqual(qos.increment_eventually(3), 14) + self.assertEqual(qos.increment_eventually(-30), 14) + self.assertEqual(qos.decrement_eventually(7), 7) + self.assertEqual(qos.decrement_eventually(), 6) + + def test_qos_disabled_increment_decrement(self): + qos = self._QoS(0) + self.assertEqual(qos.increment_eventually(), 0) + self.assertEqual(qos.increment_eventually(3), 0) + self.assertEqual(qos.increment_eventually(-30), 0) + self.assertEqual(qos.decrement_eventually(7), 0) + self.assertEqual(qos.decrement_eventually(), 0) + self.assertEqual(qos.decrement_eventually(10), 0) + + def test_qos_thread_safe(self): + qos = self._QoS(10) + + def add(): + for i in range(1000): + qos.increment_eventually() + + def sub(): + for i in range(1000): + qos.decrement_eventually() + + def threaded(funs): + from threading import Thread + threads = [Thread(target=fun) for fun in funs] + for thread in threads: + thread.start() + for thread in threads: + thread.join() + + threaded([add, add]) + self.assertEqual(qos.value, 2010) + + qos.value = 1000 + threaded([add, sub]) # n = 2 + self.assertEqual(qos.value, 1000) + + def test_exceeds_short(self): + qos = QoS(Mock(), PREFETCH_COUNT_MAX - 1) + qos.update() + self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1) + qos.increment_eventually() + self.assertEqual(qos.value, PREFETCH_COUNT_MAX) + qos.increment_eventually() + self.assertEqual(qos.value, PREFETCH_COUNT_MAX + 1) + qos.decrement_eventually() + self.assertEqual(qos.value, PREFETCH_COUNT_MAX) + qos.decrement_eventually() + self.assertEqual(qos.value, PREFETCH_COUNT_MAX - 1) + + def test_consumer_increment_decrement(self): + mconsumer = Mock() + qos = QoS(mconsumer.qos, 10) + qos.update() + self.assertEqual(qos.value, 10) + mconsumer.qos.assert_called_with(prefetch_count=10) + qos.decrement_eventually() + qos.update() + self.assertEqual(qos.value, 9) + mconsumer.qos.assert_called_with(prefetch_count=9) + qos.decrement_eventually() + self.assertEqual(qos.value, 8) + mconsumer.qos.assert_called_with(prefetch_count=9) + self.assertIn({'prefetch_count': 9}, mconsumer.qos.call_args) + + # Does not decrement 0 value + qos.value = 0 + qos.decrement_eventually() + self.assertEqual(qos.value, 0) + qos.increment_eventually() + self.assertEqual(qos.value, 0) + + def test_consumer_decrement_eventually(self): + mconsumer = Mock() + qos = QoS(mconsumer.qos, 10) + qos.decrement_eventually() + self.assertEqual(qos.value, 9) + qos.value = 0 + qos.decrement_eventually() + self.assertEqual(qos.value, 0) + + def test_set(self): + mconsumer = Mock() + qos = QoS(mconsumer.qos, 10) + qos.set(12) + self.assertEqual(qos.prev, 12) + qos.set(qos.prev) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/test_compat.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_compat.py new file mode 100644 index 0000000..b081cf0 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_compat.py @@ -0,0 +1,331 @@ +from __future__ import absolute_import + +from kombu import Connection, Exchange, Queue +from kombu import compat + +from .case import Case, Mock, patch +from .mocks import Transport, Channel + + +class test_misc(Case): + + def test_iterconsume(self): + + class MyConnection(object): + drained = 0 + + def drain_events(self, *args, **kwargs): + self.drained += 1 + return self.drained + + class Consumer(object): + active = False + + def consume(self, *args, **kwargs): + self.active = True + + conn = MyConnection() + consumer = Consumer() + it = compat._iterconsume(conn, consumer) + self.assertEqual(next(it), 1) + self.assertTrue(consumer.active) + + it2 = compat._iterconsume(conn, consumer, limit=10) + self.assertEqual(list(it2), [2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + + def test_Queue_from_dict(self): + defs = {'binding_key': 'foo.#', + 'exchange': 'fooex', + 'exchange_type': 'topic', + 'durable': True, + 'auto_delete': False} + + q1 = Queue.from_dict('foo', **dict(defs)) + self.assertEqual(q1.name, 'foo') + self.assertEqual(q1.routing_key, 'foo.#') + self.assertEqual(q1.exchange.name, 'fooex') + self.assertEqual(q1.exchange.type, 'topic') + self.assertTrue(q1.durable) + self.assertTrue(q1.exchange.durable) + self.assertFalse(q1.auto_delete) + self.assertFalse(q1.exchange.auto_delete) + + q2 = Queue.from_dict('foo', **dict(defs, + exchange_durable=False)) + self.assertTrue(q2.durable) + self.assertFalse(q2.exchange.durable) + + q3 = Queue.from_dict('foo', **dict(defs, + exchange_auto_delete=True)) + self.assertFalse(q3.auto_delete) + self.assertTrue(q3.exchange.auto_delete) + + q4 = Queue.from_dict('foo', **dict(defs, + queue_durable=False)) + self.assertFalse(q4.durable) + self.assertTrue(q4.exchange.durable) + + q5 = Queue.from_dict('foo', **dict(defs, + queue_auto_delete=True)) + self.assertTrue(q5.auto_delete) + self.assertFalse(q5.exchange.auto_delete) + + self.assertEqual(Queue.from_dict('foo', **dict(defs)), + Queue.from_dict('foo', **dict(defs))) + + +class test_Publisher(Case): + + def setUp(self): + self.connection = Connection(transport=Transport) + + def test_constructor(self): + pub = compat.Publisher(self.connection, + exchange='test_Publisher_constructor', + routing_key='rkey') + self.assertIsInstance(pub.backend, Channel) + self.assertEqual(pub.exchange.name, 'test_Publisher_constructor') + self.assertTrue(pub.exchange.durable) + self.assertFalse(pub.exchange.auto_delete) + self.assertEqual(pub.exchange.type, 'direct') + + pub2 = compat.Publisher(self.connection, + exchange='test_Publisher_constructor2', + routing_key='rkey', + auto_delete=True, + durable=False) + self.assertTrue(pub2.exchange.auto_delete) + self.assertFalse(pub2.exchange.durable) + + explicit = Exchange('test_Publisher_constructor_explicit', + type='topic') + pub3 = compat.Publisher(self.connection, + exchange=explicit) + self.assertEqual(pub3.exchange, explicit) + + compat.Publisher(self.connection, + exchange='test_Publisher_constructor3', + channel=self.connection.default_channel) + + def test_send(self): + pub = compat.Publisher(self.connection, + exchange='test_Publisher_send', + routing_key='rkey') + pub.send({'foo': 'bar'}) + self.assertIn('basic_publish', pub.backend) + pub.close() + + def test__enter__exit__(self): + pub = compat.Publisher(self.connection, + exchange='test_Publisher_send', + routing_key='rkey') + x = pub.__enter__() + self.assertIs(x, pub) + x.__exit__() + self.assertTrue(pub._closed) + + +class test_Consumer(Case): + + def setUp(self): + self.connection = Connection(transport=Transport) + + @patch('kombu.compat._iterconsume') + def test_iterconsume_calls__iterconsume(self, it, n='test_iterconsume'): + c = compat.Consumer(self.connection, queue=n, exchange=n) + c.iterconsume(limit=10, no_ack=True) + it.assert_called_with(c.connection, c, True, 10) + + def test_constructor(self, n='test_Consumer_constructor'): + c = compat.Consumer(self.connection, queue=n, exchange=n, + routing_key='rkey') + self.assertIsInstance(c.backend, Channel) + q = c.queues[0] + self.assertTrue(q.durable) + self.assertTrue(q.exchange.durable) + self.assertFalse(q.auto_delete) + self.assertFalse(q.exchange.auto_delete) + self.assertEqual(q.name, n) + self.assertEqual(q.exchange.name, n) + + c2 = compat.Consumer(self.connection, queue=n + '2', + exchange=n + '2', + routing_key='rkey', durable=False, + auto_delete=True, exclusive=True) + q2 = c2.queues[0] + self.assertFalse(q2.durable) + self.assertFalse(q2.exchange.durable) + self.assertTrue(q2.auto_delete) + self.assertTrue(q2.exchange.auto_delete) + + def test__enter__exit__(self, n='test__enter__exit__'): + c = compat.Consumer(self.connection, queue=n, exchange=n, + routing_key='rkey') + x = c.__enter__() + self.assertIs(x, c) + x.__exit__() + self.assertTrue(c._closed) + + def test_revive(self, n='test_revive'): + c = compat.Consumer(self.connection, queue=n, exchange=n) + + with self.connection.channel() as c2: + c.revive(c2) + self.assertIs(c.backend, c2) + + def test__iter__(self, n='test__iter__'): + c = compat.Consumer(self.connection, queue=n, exchange=n) + c.iterqueue = Mock() + + c.__iter__() + c.iterqueue.assert_called_with(infinite=True) + + def test_iter(self, n='test_iterqueue'): + c = compat.Consumer(self.connection, queue=n, exchange=n, + routing_key='rkey') + c.close() + + def test_process_next(self, n='test_process_next'): + c = compat.Consumer(self.connection, queue=n, exchange=n, + routing_key='rkey') + with self.assertRaises(NotImplementedError): + c.process_next() + c.close() + + def test_iterconsume(self, n='test_iterconsume'): + c = compat.Consumer(self.connection, queue=n, exchange=n, + routing_key='rkey') + c.close() + + def test_discard_all(self, n='test_discard_all'): + c = compat.Consumer(self.connection, queue=n, exchange=n, + routing_key='rkey') + c.discard_all() + self.assertIn('queue_purge', c.backend) + + def test_fetch(self, n='test_fetch'): + c = compat.Consumer(self.connection, queue=n, exchange=n, + routing_key='rkey') + self.assertIsNone(c.fetch()) + self.assertIsNone(c.fetch(no_ack=True)) + self.assertIn('basic_get', c.backend) + + callback_called = [False] + + def receive(payload, message): + callback_called[0] = True + + c.backend.to_deliver.append('42') + payload = c.fetch().payload + self.assertEqual(payload, '42') + c.backend.to_deliver.append('46') + c.register_callback(receive) + self.assertEqual(c.fetch(enable_callbacks=True).payload, '46') + self.assertTrue(callback_called[0]) + + def test_discard_all_filterfunc_not_supported(self, n='xjf21j21'): + c = compat.Consumer(self.connection, queue=n, exchange=n, + routing_key='rkey') + with self.assertRaises(NotImplementedError): + c.discard_all(filterfunc=lambda x: x) + c.close() + + def test_wait(self, n='test_wait'): + + class C(compat.Consumer): + + def iterconsume(self, limit=None): + for i in range(limit): + yield i + + c = C(self.connection, + queue=n, exchange=n, routing_key='rkey') + self.assertEqual(c.wait(10), list(range(10))) + c.close() + + def test_iterqueue(self, n='test_iterqueue'): + i = [0] + + class C(compat.Consumer): + + def fetch(self, limit=None): + z = i[0] + i[0] += 1 + return z + + c = C(self.connection, + queue=n, exchange=n, routing_key='rkey') + self.assertEqual(list(c.iterqueue(limit=10)), list(range(10))) + c.close() + + +class test_ConsumerSet(Case): + + def setUp(self): + self.connection = Connection(transport=Transport) + + def test_providing_channel(self): + chan = Mock(name='channel') + cs = compat.ConsumerSet(self.connection, channel=chan) + self.assertTrue(cs._provided_channel) + self.assertIs(cs.backend, chan) + + cs.cancel = Mock(name='cancel') + cs.close() + self.assertFalse(chan.close.called) + + @patch('kombu.compat._iterconsume') + def test_iterconsume(self, _iterconsume, n='test_iterconsume'): + c = compat.Consumer(self.connection, queue=n, exchange=n) + cs = compat.ConsumerSet(self.connection, consumers=[c]) + cs.iterconsume(limit=10, no_ack=True) + _iterconsume.assert_called_with(c.connection, cs, True, 10) + + def test_revive(self, n='test_revive'): + c = compat.Consumer(self.connection, queue=n, exchange=n) + cs = compat.ConsumerSet(self.connection, consumers=[c]) + + with self.connection.channel() as c2: + cs.revive(c2) + self.assertIs(cs.backend, c2) + + def test_constructor(self, prefix='0daf8h21'): + dcon = {'%s.xyx' % prefix: {'exchange': '%s.xyx' % prefix, + 'routing_key': 'xyx'}, + '%s.xyz' % prefix: {'exchange': '%s.xyz' % prefix, + 'routing_key': 'xyz'}} + consumers = [compat.Consumer(self.connection, queue=prefix + str(i), + exchange=prefix + str(i)) + for i in range(3)] + c = compat.ConsumerSet(self.connection, consumers=consumers) + c2 = compat.ConsumerSet(self.connection, from_dict=dcon) + + self.assertEqual(len(c.queues), 3) + self.assertEqual(len(c2.queues), 2) + + c.add_consumer(compat.Consumer(self.connection, + queue=prefix + 'xaxxxa', + exchange=prefix + 'xaxxxa')) + self.assertEqual(len(c.queues), 4) + for cq in c.queues: + self.assertIs(cq.channel, c.channel) + + c2.add_consumer_from_dict({ + '%s.xxx' % prefix: { + 'exchange': '%s.xxx' % prefix, + 'routing_key': 'xxx', + }, + }) + self.assertEqual(len(c2.queues), 3) + for c2q in c2.queues: + self.assertIs(c2q.channel, c2.channel) + + c.discard_all() + self.assertEqual(c.channel.called.count('queue_purge'), 4) + c.consume() + + c.close() + c2.close() + self.assertIn('basic_cancel', c.channel) + self.assertIn('close', c.channel) + self.assertIn('close', c2.channel) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/test_compression.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_compression.py new file mode 100644 index 0000000..7d651ee --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_compression.py @@ -0,0 +1,50 @@ +from __future__ import absolute_import + +import sys + +from kombu import compression + +from .case import Case, SkipTest, mask_modules + + +class test_compression(Case): + + def setUp(self): + try: + import bz2 # noqa + except ImportError: + self.has_bzip2 = False + else: + self.has_bzip2 = True + + @mask_modules('bz2') + def test_no_bz2(self): + c = sys.modules.pop('kombu.compression') + try: + import kombu.compression + self.assertFalse(hasattr(kombu.compression, 'bz2')) + finally: + if c is not None: + sys.modules['kombu.compression'] = c + + def test_encoders(self): + encoders = compression.encoders() + self.assertIn('application/x-gzip', encoders) + if self.has_bzip2: + self.assertIn('application/x-bz2', encoders) + + def test_compress__decompress__zlib(self): + text = b'The Quick Brown Fox Jumps Over The Lazy Dog' + c, ctype = compression.compress(text, 'zlib') + self.assertNotEqual(text, c) + d = compression.decompress(c, ctype) + self.assertEqual(d, text) + + def test_compress__decompress__bzip2(self): + if not self.has_bzip2: + raise SkipTest('bzip2 not available') + text = b'The Brown Quick Fox Over The Lazy Dog Jumps' + c, ctype = compression.compress(text, 'bzip2') + self.assertNotEqual(text, c) + d = compression.decompress(c, ctype) + self.assertEqual(d, text) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/test_connection.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_connection.py new file mode 100644 index 0000000..2fa6097 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_connection.py @@ -0,0 +1,693 @@ +from __future__ import absolute_import + +import pickle +import socket + +from copy import copy, deepcopy + +from kombu import Connection, Consumer, Producer, parse_url +from kombu.connection import Resource +from kombu.five import items, range + +from .case import Case, Mock, SkipTest, patch, skip_if_not_module +from .mocks import Transport + + +class test_connection_utils(Case): + + def setUp(self): + self.url = 'amqp://user:pass@localhost:5672/my/vhost' + self.nopass = 'amqp://user:**@localhost:5672/my/vhost' + self.expected = { + 'transport': 'amqp', + 'userid': 'user', + 'password': 'pass', + 'hostname': 'localhost', + 'port': 5672, + 'virtual_host': 'my/vhost', + } + + def test_parse_url(self): + result = parse_url(self.url) + self.assertDictEqual(result, self.expected) + + def test_parse_generated_as_uri(self): + conn = Connection(self.url) + info = conn.info() + for k, v in self.expected.items(): + self.assertEqual(info[k], v) + # by default almost the same- no password + self.assertEqual(conn.as_uri(), self.nopass) + self.assertEqual(conn.as_uri(include_password=True), self.url) + + def test_as_uri_when_prefix(self): + conn = Connection('redis+socket:///var/spool/x/y/z/redis.sock') + self.assertEqual( + conn.as_uri(), 'redis+socket:///var/spool/x/y/z/redis.sock', + ) + + @skip_if_not_module('pymongo') + def test_as_uri_when_mongodb(self): + x = Connection('mongodb://localhost') + self.assertTrue(x.as_uri()) + + def test_bogus_scheme(self): + with self.assertRaises(KeyError): + Connection('bogus://localhost:7421').transport + + def assert_info(self, conn, **fields): + info = conn.info() + for field, expected in items(fields): + self.assertEqual(info[field], expected) + + def test_rabbitmq_example_urls(self): + # see Appendix A of http://www.rabbitmq.com/uri-spec.html + + self.assert_info( + Connection('amqp://user:pass@host:10000/vhost'), + userid='user', password='pass', hostname='host', + port=10000, virtual_host='vhost', + ) + + self.assert_info( + Connection('amqp://user%61:%61pass@ho%61st:10000/v%2fhost'), + userid='usera', password='apass', hostname='hoast', + port=10000, virtual_host='v/host', + ) + + self.assert_info( + Connection('amqp://'), + userid='guest', password='guest', hostname='localhost', + port=5672, virtual_host='/', + ) + + self.assert_info( + Connection('amqp://:@/'), + userid='guest', password='guest', hostname='localhost', + port=5672, virtual_host='/', + ) + + self.assert_info( + Connection('amqp://user@/'), + userid='user', password='guest', hostname='localhost', + port=5672, virtual_host='/', + ) + + self.assert_info( + Connection('amqp://user:pass@/'), + userid='user', password='pass', hostname='localhost', + port=5672, virtual_host='/', + ) + + self.assert_info( + Connection('amqp://host'), + userid='guest', password='guest', hostname='host', + port=5672, virtual_host='/', + ) + + self.assert_info( + Connection('amqp://:10000'), + userid='guest', password='guest', hostname='localhost', + port=10000, virtual_host='/', + ) + + self.assert_info( + Connection('amqp:///vhost'), + userid='guest', password='guest', hostname='localhost', + port=5672, virtual_host='vhost', + ) + + self.assert_info( + Connection('amqp://host/'), + userid='guest', password='guest', hostname='host', + port=5672, virtual_host='/', + ) + + self.assert_info( + Connection('amqp://host/%2f'), + userid='guest', password='guest', hostname='host', + port=5672, virtual_host='/', + ) + + def test_url_IPV6(self): + raise SkipTest("urllib can't parse ipv6 urls") + + self.assert_info( + Connection('amqp://[::1]'), + userid='guest', password='guest', hostname='[::1]', + port=5672, virtual_host='/', + ) + + def test_connection_copy(self): + conn = Connection(self.url, alternates=['amqp://host']) + clone = deepcopy(conn) + self.assertEqual(clone.alt, ['amqp://host']) + + +class test_Connection(Case): + + def setUp(self): + self.conn = Connection(port=5672, transport=Transport) + + def test_establish_connection(self): + conn = self.conn + conn.connect() + self.assertTrue(conn.connection.connected) + self.assertEqual(conn.host, 'localhost:5672') + channel = conn.channel() + self.assertTrue(channel.open) + self.assertEqual(conn.drain_events(), 'event') + _connection = conn.connection + conn.close() + self.assertFalse(_connection.connected) + self.assertIsInstance(conn.transport, Transport) + + def test_multiple_urls(self): + conn1 = Connection('amqp://foo;amqp://bar') + self.assertEqual(conn1.hostname, 'foo') + self.assertListEqual(conn1.alt, ['amqp://foo', 'amqp://bar']) + + conn2 = Connection(['amqp://foo', 'amqp://bar']) + self.assertEqual(conn2.hostname, 'foo') + self.assertListEqual(conn2.alt, ['amqp://foo', 'amqp://bar']) + + def test_collect(self): + connection = Connection('memory://') + trans = connection._transport = Mock(name='transport') + _collect = trans._collect = Mock(name='transport._collect') + _close = connection._close = Mock(name='connection._close') + connection.declared_entities = Mock(name='decl_entities') + uconn = connection._connection = Mock(name='_connection') + connection.collect() + + self.assertFalse(_close.called) + _collect.assert_called_with(uconn) + connection.declared_entities.clear.assert_called_with() + self.assertIsNone(trans.client) + self.assertIsNone(connection._transport) + self.assertIsNone(connection._connection) + + def test_collect_no_transport(self): + connection = Connection('memory://') + connection._transport = None + connection._close = Mock() + connection.collect() + connection._close.assert_called_with() + + connection._close.side_effect = socket.timeout() + connection.collect() + + def test_collect_transport_gone(self): + connection = Connection('memory://') + uconn = connection._connection = Mock(name='conn._conn') + trans = connection._transport = Mock(name='transport') + collect = trans._collect = Mock(name='transport._collect') + + def se(conn): + connection._transport = None + collect.side_effect = se + + connection.collect() + collect.assert_called_with(uconn) + self.assertIsNone(connection._transport) + + def test_uri_passthrough(self): + transport = Mock(name='transport') + with patch('kombu.connection.get_transport_cls') as gtc: + gtc.return_value = transport + transport.can_parse_url = True + with patch('kombu.connection.parse_url') as parse_url: + c = Connection('foo+mysql://some_host') + self.assertEqual(c.transport_cls, 'foo') + self.assertFalse(parse_url.called) + self.assertEqual(c.hostname, 'mysql://some_host') + self.assertTrue(c.as_uri().startswith('foo+')) + with patch('kombu.connection.parse_url') as parse_url: + c = Connection('mysql://some_host', transport='foo') + self.assertEqual(c.transport_cls, 'foo') + self.assertFalse(parse_url.called) + self.assertEqual(c.hostname, 'mysql://some_host') + c = Connection('pyamqp+sqlite://some_host') + self.assertTrue(c.as_uri().startswith('pyamqp+')) + + def test_default_ensure_callback(self): + with patch('kombu.connection.logger') as logger: + c = Connection(transport=Mock) + c._default_ensure_callback(KeyError(), 3) + self.assertTrue(logger.error.called) + + def test_ensure_connection_on_error(self): + c = Connection('amqp://A;amqp://B') + with patch('kombu.connection.retry_over_time') as rot: + c.ensure_connection() + self.assertTrue(rot.called) + + args = rot.call_args[0] + cb = args[4] + intervals = iter([1, 2, 3, 4, 5]) + self.assertEqual(cb(KeyError(), intervals, 0), 0) + self.assertEqual(cb(KeyError(), intervals, 1), 1) + self.assertEqual(cb(KeyError(), intervals, 2), 0) + self.assertEqual(cb(KeyError(), intervals, 3), 2) + self.assertEqual(cb(KeyError(), intervals, 4), 0) + self.assertEqual(cb(KeyError(), intervals, 5), 3) + self.assertEqual(cb(KeyError(), intervals, 6), 0) + self.assertEqual(cb(KeyError(), intervals, 7), 4) + + errback = Mock() + c.ensure_connection(errback=errback) + args = rot.call_args[0] + cb = args[4] + self.assertEqual(cb(KeyError(), intervals, 0), 0) + self.assertTrue(errback.called) + + def test_supports_heartbeats(self): + c = Connection(transport=Mock) + c.transport.supports_heartbeats = False + self.assertFalse(c.supports_heartbeats) + + def test_is_evented(self): + c = Connection(transport=Mock) + c.transport.supports_ev = False + self.assertFalse(c.is_evented) + + def test_register_with_event_loop(self): + c = Connection(transport=Mock) + loop = Mock(name='loop') + c.register_with_event_loop(loop) + c.transport.register_with_event_loop.assert_called_with( + c.connection, loop, + ) + + def test_manager(self): + c = Connection(transport=Mock) + self.assertIs(c.manager, c.transport.manager) + + def test_copy(self): + c = Connection('amqp://example.com') + self.assertEqual(copy(c).info(), c.info()) + + def test_copy_multiples(self): + c = Connection('amqp://A.example.com;amqp://B.example.com') + self.assertTrue(c.alt) + d = copy(c) + self.assertEqual(d.alt, c.alt) + + def test_switch(self): + c = Connection('amqp://foo') + c._closed = True + c.switch('redis://example.com//3') + self.assertFalse(c._closed) + self.assertEqual(c.hostname, 'example.com') + self.assertEqual(c.transport_cls, 'redis') + self.assertEqual(c.virtual_host, '/3') + + def test_maybe_switch_next(self): + c = Connection('amqp://foo;redis://example.com//3') + c.maybe_switch_next() + self.assertFalse(c._closed) + self.assertEqual(c.hostname, 'example.com') + self.assertEqual(c.transport_cls, 'redis') + self.assertEqual(c.virtual_host, '/3') + + def test_maybe_switch_next_no_cycle(self): + c = Connection('amqp://foo') + c.maybe_switch_next() + self.assertFalse(c._closed) + self.assertEqual(c.hostname, 'foo') + self.assertIn(c.transport_cls, ('librabbitmq', 'pyamqp', 'amqp')) + + def test_heartbeat_check(self): + c = Connection(transport=Transport) + c.transport.heartbeat_check = Mock() + c.heartbeat_check(3) + c.transport.heartbeat_check.assert_called_with(c.connection, rate=3) + + def test_completes_cycle_no_cycle(self): + c = Connection('amqp://') + self.assertTrue(c.completes_cycle(0)) + self.assertTrue(c.completes_cycle(1)) + + def test_completes_cycle(self): + c = Connection('amqp://a;amqp://b;amqp://c') + self.assertFalse(c.completes_cycle(0)) + self.assertFalse(c.completes_cycle(1)) + self.assertTrue(c.completes_cycle(2)) + + def test__enter____exit__(self): + conn = self.conn + context = conn.__enter__() + self.assertIs(context, conn) + conn.connect() + self.assertTrue(conn.connection.connected) + conn.__exit__() + self.assertIsNone(conn.connection) + conn.close() # again + + def test_close_survives_connerror(self): + + class _CustomError(Exception): + pass + + class MyTransport(Transport): + connection_errors = (_CustomError, ) + + def close_connection(self, connection): + raise _CustomError('foo') + + conn = Connection(transport=MyTransport) + conn.connect() + conn.close() + self.assertTrue(conn._closed) + + def test_close_when_default_channel(self): + conn = self.conn + conn._default_channel = Mock() + conn._close() + conn._default_channel.close.assert_called_with() + + def test_close_when_default_channel_close_raises(self): + + class Conn(Connection): + + @property + def connection_errors(self): + return (KeyError, ) + + conn = Conn('memory://') + conn._default_channel = Mock() + conn._default_channel.close.side_effect = KeyError() + + conn._close() + conn._default_channel.close.assert_called_with() + + def test_revive_when_default_channel(self): + conn = self.conn + defchan = conn._default_channel = Mock() + conn.revive(Mock()) + + defchan.close.assert_called_with() + self.assertIsNone(conn._default_channel) + + def test_ensure_connection(self): + self.assertTrue(self.conn.ensure_connection()) + + def test_ensure_success(self): + def publish(): + return 'foobar' + + ensured = self.conn.ensure(None, publish) + self.assertEqual(ensured(), 'foobar') + + def test_ensure_failure(self): + class _CustomError(Exception): + pass + + def publish(): + raise _CustomError('bar') + + ensured = self.conn.ensure(None, publish) + with self.assertRaises(_CustomError): + ensured() + + def test_ensure_connection_failure(self): + class _ConnectionError(Exception): + pass + + def publish(): + raise _ConnectionError('failed connection') + + self.conn.transport.connection_errors = (_ConnectionError,) + ensured = self.conn.ensure(self.conn, publish) + with self.assertRaises(_ConnectionError): + ensured() + + def test_autoretry(self): + myfun = Mock() + + self.conn.transport.connection_errors = (KeyError, ) + + def on_call(*args, **kwargs): + myfun.side_effect = None + raise KeyError('foo') + + myfun.side_effect = on_call + insured = self.conn.autoretry(myfun) + insured() + + self.assertTrue(myfun.called) + + def test_SimpleQueue(self): + conn = self.conn + q = conn.SimpleQueue('foo') + self.assertIs(q.channel, conn.default_channel) + chan = conn.channel() + q2 = conn.SimpleQueue('foo', channel=chan) + self.assertIs(q2.channel, chan) + + def test_SimpleBuffer(self): + conn = self.conn + q = conn.SimpleBuffer('foo') + self.assertIs(q.channel, conn.default_channel) + chan = conn.channel() + q2 = conn.SimpleBuffer('foo', channel=chan) + self.assertIs(q2.channel, chan) + + def test_Producer(self): + conn = self.conn + self.assertIsInstance(conn.Producer(), Producer) + self.assertIsInstance(conn.Producer(conn.default_channel), Producer) + + def test_Consumer(self): + conn = self.conn + self.assertIsInstance(conn.Consumer(queues=[]), Consumer) + self.assertIsInstance(conn.Consumer(queues=[], + channel=conn.default_channel), Consumer) + + def test__repr__(self): + self.assertTrue(repr(self.conn)) + + def test__reduce__(self): + x = pickle.loads(pickle.dumps(self.conn)) + self.assertDictEqual(x.info(), self.conn.info()) + + def test_channel_errors(self): + + class MyTransport(Transport): + channel_errors = (KeyError, ValueError) + + conn = Connection(transport=MyTransport) + self.assertTupleEqual(conn.channel_errors, (KeyError, ValueError)) + + def test_connection_errors(self): + + class MyTransport(Transport): + connection_errors = (KeyError, ValueError) + + conn = Connection(transport=MyTransport) + self.assertTupleEqual(conn.connection_errors, (KeyError, ValueError)) + + +class test_Connection_with_transport_options(Case): + + transport_options = {'pool_recycler': 3600, 'echo': True} + + def setUp(self): + self.conn = Connection(port=5672, transport=Transport, + transport_options=self.transport_options) + + def test_establish_connection(self): + conn = self.conn + self.assertEqual(conn.transport_options, self.transport_options) + + +class xResource(Resource): + + def setup(self): + pass + + +class ResourceCase(Case): + abstract = True + + def create_resource(self, limit, preload): + raise NotImplementedError('subclass responsibility') + + def assertState(self, P, avail, dirty): + self.assertEqual(P._resource.qsize(), avail) + self.assertEqual(len(P._dirty), dirty) + + def test_setup(self): + if self.abstract: + with self.assertRaises(NotImplementedError): + Resource() + + def test_acquire__release(self): + if self.abstract: + return + P = self.create_resource(10, 0) + self.assertState(P, 10, 0) + chans = [P.acquire() for _ in range(10)] + self.assertState(P, 0, 10) + with self.assertRaises(P.LimitExceeded): + P.acquire() + chans.pop().release() + self.assertState(P, 1, 9) + [chan.release() for chan in chans] + self.assertState(P, 10, 0) + + def test_acquire_prepare_raises(self): + if self.abstract: + return + P = self.create_resource(10, 0) + + self.assertEqual(len(P._resource.queue), 10) + P.prepare = Mock() + P.prepare.side_effect = IOError() + with self.assertRaises(IOError): + P.acquire(block=True) + self.assertEqual(len(P._resource.queue), 10) + + def test_acquire_no_limit(self): + if self.abstract: + return + P = self.create_resource(None, 0) + P.acquire().release() + + def test_replace_when_limit(self): + if self.abstract: + return + P = self.create_resource(10, 0) + r = P.acquire() + P._dirty = Mock() + P.close_resource = Mock() + + P.replace(r) + P._dirty.discard.assert_called_with(r) + P.close_resource.assert_called_with(r) + + def test_replace_no_limit(self): + if self.abstract: + return + P = self.create_resource(None, 0) + r = P.acquire() + P._dirty = Mock() + P.close_resource = Mock() + + P.replace(r) + self.assertFalse(P._dirty.discard.called) + P.close_resource.assert_called_with(r) + + def test_interface_prepare(self): + if not self.abstract: + return + x = xResource() + self.assertEqual(x.prepare(10), 10) + + def test_force_close_all_handles_AttributeError(self): + if self.abstract: + return + P = self.create_resource(10, 10) + cr = P.collect_resource = Mock() + cr.side_effect = AttributeError('x') + + P.acquire() + self.assertTrue(P._dirty) + + P.force_close_all() + + def test_force_close_all_no_mutex(self): + if self.abstract: + return + P = self.create_resource(10, 10) + P.close_resource = Mock() + + m = P._resource = Mock() + m.mutex = None + m.queue.pop.side_effect = IndexError + + P.force_close_all() + + def test_add_when_empty(self): + if self.abstract: + return + P = self.create_resource(None, None) + P._resource.queue[:] = [] + self.assertFalse(P._resource.queue) + P._add_when_empty() + self.assertTrue(P._resource.queue) + + +class test_ConnectionPool(ResourceCase): + abstract = False + + def create_resource(self, limit, preload): + return Connection(port=5672, transport=Transport).Pool(limit, preload) + + def test_setup(self): + P = self.create_resource(10, 2) + q = P._resource.queue + self.assertIsNotNone(q[0]._connection) + self.assertIsNotNone(q[1]._connection) + self.assertIsNone(q[2]()._connection) + + def test_acquire_raises_evaluated(self): + P = self.create_resource(1, 0) + # evaluate the connection first + r = P.acquire() + r.release() + P.prepare = Mock() + P.prepare.side_effect = MemoryError() + P.release = Mock() + with self.assertRaises(MemoryError): + with P.acquire(): + assert False + P.release.assert_called_with(r) + + def test_release_no__debug(self): + P = self.create_resource(10, 2) + R = Mock() + R._debug.side_effect = AttributeError() + P.release_resource(R) + + def test_setup_no_limit(self): + P = self.create_resource(None, None) + self.assertFalse(P._resource.queue) + self.assertIsNone(P.limit) + + def test_prepare_not_callable(self): + P = self.create_resource(None, None) + conn = Connection('memory://') + self.assertIs(P.prepare(conn), conn) + + def test_acquire_channel(self): + P = self.create_resource(10, 0) + with P.acquire_channel() as (conn, channel): + self.assertIs(channel, conn.default_channel) + + +class test_ChannelPool(ResourceCase): + abstract = False + + def create_resource(self, limit, preload): + return Connection(port=5672, transport=Transport) \ + .ChannelPool(limit, preload) + + def test_setup(self): + P = self.create_resource(10, 2) + q = P._resource.queue + self.assertTrue(q[0].basic_consume) + self.assertTrue(q[1].basic_consume) + with self.assertRaises(AttributeError): + getattr(q[2], 'basic_consume') + + def test_setup_no_limit(self): + P = self.create_resource(None, None) + self.assertFalse(P._resource.queue) + self.assertIsNone(P.limit) + + def test_prepare_not_callable(self): + P = self.create_resource(10, 0) + conn = Connection('memory://') + chan = conn.default_channel + self.assertIs(P.prepare(chan), chan) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/test_entities.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_entities.py new file mode 100644 index 0000000..21deebd --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_entities.py @@ -0,0 +1,374 @@ +from __future__ import absolute_import + +import pickle + +from kombu import Connection, Exchange, Producer, Queue, binding +from kombu.exceptions import NotBoundError +from kombu.serialization import registry + +from .case import Case, Mock, call +from .mocks import Transport + + +def get_conn(): + return Connection(transport=Transport) + + +class test_binding(Case): + + def test_constructor(self): + x = binding( + Exchange('foo'), 'rkey', + arguments={'barg': 'bval'}, + unbind_arguments={'uarg': 'uval'}, + ) + self.assertEqual(x.exchange, Exchange('foo')) + self.assertEqual(x.routing_key, 'rkey') + self.assertDictEqual(x.arguments, {'barg': 'bval'}) + self.assertDictEqual(x.unbind_arguments, {'uarg': 'uval'}) + + def test_declare(self): + chan = get_conn().channel() + x = binding(Exchange('foo'), 'rkey') + x.declare(chan) + self.assertIn('exchange_declare', chan) + + def test_declare_no_exchange(self): + chan = get_conn().channel() + x = binding() + x.declare(chan) + self.assertNotIn('exchange_declare', chan) + + def test_bind(self): + chan = get_conn().channel() + x = binding(Exchange('foo')) + x.bind(Exchange('bar')(chan)) + self.assertIn('exchange_bind', chan) + + def test_unbind(self): + chan = get_conn().channel() + x = binding(Exchange('foo')) + x.unbind(Exchange('bar')(chan)) + self.assertIn('exchange_unbind', chan) + + def test_repr(self): + b = binding(Exchange('foo'), 'rkey') + self.assertIn('foo', repr(b)) + self.assertIn('rkey', repr(b)) + + +class test_Exchange(Case): + + def test_bound(self): + exchange = Exchange('foo', 'direct') + self.assertFalse(exchange.is_bound) + self.assertIn('= 1: + self.c.should_stop = True + counter[0] += 1 + return counter + self.c.should_stop = False + consume.side_effect = se + self.c.run() + self.assertTrue(sleep.called) + + def test_run_raises(self): + conn = ContextMock(name='connection') + self.c.connection = conn + conn.connection_errors = (KeyError, ) + conn.channel_errors = () + consume = self.c.consume = Mock(name='c.consume') + + with patch('kombu.mixins.warn') as warn: + def se_raises(*args, **kwargs): + self.c.should_stop = True + raise KeyError('foo') + self.c.should_stop = False + consume.side_effect = se_raises + self.c.run() + self.assertTrue(warn.called) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/test_pidbox.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_pidbox.py new file mode 100644 index 0000000..357de65 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_pidbox.py @@ -0,0 +1,287 @@ +from __future__ import absolute_import + +import socket +import warnings + +from kombu import Connection +from kombu import pidbox +from kombu.exceptions import ContentDisallowed, InconsistencyError +from kombu.utils import uuid + +from .case import Case, Mock, patch + + +class test_Mailbox(Case): + + def _handler(self, state): + return self.stats['var'] + + def setUp(self): + + class Mailbox(pidbox.Mailbox): + + def _collect(self, *args, **kwargs): + return 'COLLECTED' + + self.mailbox = Mailbox('test_pidbox') + self.connection = Connection(transport='memory') + self.state = {'var': 1} + self.handlers = {'mymethod': self._handler} + self.bound = self.mailbox(self.connection) + self.default_chan = self.connection.channel() + self.node = self.bound.Node( + 'test_pidbox', + state=self.state, handlers=self.handlers, + channel=self.default_chan, + ) + + def test_publish_reply_ignores_InconsistencyError(self): + mailbox = pidbox.Mailbox('test_reply__collect')(self.connection) + with patch('kombu.pidbox.Producer') as Producer: + producer = Producer.return_value = Mock(name='producer') + producer.publish.side_effect = InconsistencyError() + mailbox._publish_reply( + {'foo': 'bar'}, mailbox.reply_exchange, mailbox.oid, 'foo', + ) + self.assertTrue(producer.publish.called) + + def test_reply__collect(self): + mailbox = pidbox.Mailbox('test_reply__collect')(self.connection) + exchange = mailbox.reply_exchange.name + channel = self.connection.channel() + mailbox.reply_queue(channel).declare() + + ticket = uuid() + mailbox._publish_reply({'foo': 'bar'}, exchange, mailbox.oid, ticket) + _callback_called = [False] + + def callback(body): + _callback_called[0] = True + + reply = mailbox._collect(ticket, limit=1, + callback=callback, channel=channel) + self.assertEqual(reply, [{'foo': 'bar'}]) + self.assertTrue(_callback_called[0]) + + ticket = uuid() + mailbox._publish_reply({'biz': 'boz'}, exchange, mailbox.oid, ticket) + reply = mailbox._collect(ticket, limit=1, channel=channel) + self.assertEqual(reply, [{'biz': 'boz'}]) + + mailbox._publish_reply({'foo': 'BAM'}, exchange, mailbox.oid, 'doom', + serializer='pickle') + with self.assertRaises(ContentDisallowed): + reply = mailbox._collect('doom', limit=1, channel=channel) + mailbox._publish_reply( + {'foo': 'BAMBAM'}, exchange, mailbox.oid, 'doom', + serializer='pickle', + ) + reply = mailbox._collect('doom', limit=1, channel=channel, + accept=['pickle']) + self.assertEqual(reply[0]['foo'], 'BAMBAM') + + de = mailbox.connection.drain_events = Mock() + de.side_effect = socket.timeout + mailbox._collect(ticket, limit=1, channel=channel) + + def test_constructor(self): + self.assertIsNone(self.mailbox.connection) + self.assertTrue(self.mailbox.exchange.name) + self.assertTrue(self.mailbox.reply_exchange.name) + + def test_bound(self): + bound = self.mailbox(self.connection) + self.assertIs(bound.connection, self.connection) + + def test_Node(self): + self.assertTrue(self.node.hostname) + self.assertTrue(self.node.state) + self.assertIs(self.node.mailbox, self.bound) + self.assertTrue(self.handlers) + + # No initial handlers + node2 = self.bound.Node('test_pidbox2', state=self.state) + self.assertDictEqual(node2.handlers, {}) + + def test_Node_consumer(self): + consumer1 = self.node.Consumer() + self.assertIs(consumer1.channel, self.default_chan) + self.assertTrue(consumer1.no_ack) + + chan2 = self.connection.channel() + consumer2 = self.node.Consumer(channel=chan2, no_ack=False) + self.assertIs(consumer2.channel, chan2) + self.assertFalse(consumer2.no_ack) + + def test_Node_consumer_multiple_listeners(self): + warnings.resetwarnings() + consumer = self.node.Consumer() + q = consumer.queues[0] + with warnings.catch_warnings(record=True) as log: + q.on_declared('foo', 1, 1) + self.assertTrue(log) + self.assertIn('already using this', log[0].message.args[0]) + + with warnings.catch_warnings(record=True) as log: + q.on_declared('foo', 1, 0) + self.assertFalse(log) + + def test_handler(self): + node = self.bound.Node('test_handler', state=self.state) + + @node.handler + def my_handler_name(state): + return 42 + + self.assertIn('my_handler_name', node.handlers) + + def test_dispatch(self): + node = self.bound.Node('test_dispatch', state=self.state) + + @node.handler + def my_handler_name(state, x=None, y=None): + return x + y + + self.assertEqual(node.dispatch('my_handler_name', + arguments={'x': 10, 'y': 10}), 20) + + def test_dispatch_raising_SystemExit(self): + node = self.bound.Node('test_dispatch_raising_SystemExit', + state=self.state) + + @node.handler + def my_handler_name(state): + raise SystemExit + + with self.assertRaises(SystemExit): + node.dispatch('my_handler_name') + + def test_dispatch_raising(self): + node = self.bound.Node('test_dispatch_raising', state=self.state) + + @node.handler + def my_handler_name(state): + raise KeyError('foo') + + res = node.dispatch('my_handler_name') + self.assertIn('error', res) + self.assertIn('KeyError', res['error']) + + def test_dispatch_replies(self): + _replied = [False] + + def reply(data, **options): + _replied[0] = True + + node = self.bound.Node('test_dispatch', state=self.state) + node.reply = reply + + @node.handler + def my_handler_name(state, x=None, y=None): + return x + y + + node.dispatch('my_handler_name', + arguments={'x': 10, 'y': 10}, + reply_to={'exchange': 'foo', 'routing_key': 'bar'}) + self.assertTrue(_replied[0]) + + def test_reply(self): + _replied = [(None, None, None)] + + def publish_reply(data, exchange, routing_key, ticket, **kwargs): + _replied[0] = (data, exchange, routing_key, ticket) + + mailbox = self.mailbox(self.connection) + mailbox._publish_reply = publish_reply + node = mailbox.Node('test_reply') + + @node.handler + def my_handler_name(state): + return 42 + + node.dispatch('my_handler_name', + reply_to={'exchange': 'exchange', + 'routing_key': 'rkey'}, + ticket='TICKET') + data, exchange, routing_key, ticket = _replied[0] + self.assertEqual(data, {'test_reply': 42}) + self.assertEqual(exchange, 'exchange') + self.assertEqual(routing_key, 'rkey') + self.assertEqual(ticket, 'TICKET') + + def test_handle_message(self): + node = self.bound.Node('test_dispatch_from_message') + + @node.handler + def my_handler_name(state, x=None, y=None): + return x * y + + body = {'method': 'my_handler_name', + 'arguments': {'x': 64, 'y': 64}} + + self.assertEqual(node.handle_message(body, None), 64 * 64) + + # message not for me should not be processed. + body['destination'] = ['some_other_node'] + self.assertIsNone(node.handle_message(body, None)) + + def test_handle_message_adjusts_clock(self): + node = self.bound.Node('test_adjusts_clock') + + @node.handler + def my_handler_name(state): + return 10 + + body = {'method': 'my_handler_name', + 'arguments': {}} + message = Mock(name='message') + message.headers = {'clock': 313} + node.adjust_clock = Mock(name='adjust_clock') + res = node.handle_message(body, message) + node.adjust_clock.assert_called_with(313) + self.assertEqual(res, 10) + + def test_listen(self): + consumer = self.node.listen() + self.assertEqual(consumer.callbacks[0], + self.node.handle_message) + self.assertEqual(consumer.channel, self.default_chan) + + def test_cast(self): + self.bound.cast(['somenode'], 'mymethod') + consumer = self.node.Consumer() + self.assertIsCast(self.get_next(consumer)) + + def test_abcast(self): + self.bound.abcast('mymethod') + consumer = self.node.Consumer() + self.assertIsCast(self.get_next(consumer)) + + def test_call_destination_must_be_sequence(self): + with self.assertRaises(ValueError): + self.bound.call('some_node', 'mymethod') + + def test_call(self): + self.assertEqual( + self.bound.call(['some_node'], 'mymethod'), + 'COLLECTED', + ) + consumer = self.node.Consumer() + self.assertIsCall(self.get_next(consumer)) + + def test_multi_call(self): + self.assertEqual(self.bound.multi_call('mymethod'), 'COLLECTED') + consumer = self.node.Consumer() + self.assertIsCall(self.get_next(consumer)) + + def get_next(self, consumer): + m = consumer.queues[0].get() + if m: + return m.payload + + def assertIsCast(self, message): + self.assertTrue(message['method']) + + def assertIsCall(self, message): + self.assertTrue(message['method']) + self.assertTrue(message['reply_to']) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/test_pools.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_pools.py new file mode 100644 index 0000000..920c65a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_pools.py @@ -0,0 +1,239 @@ +from __future__ import absolute_import + +from kombu import Connection, Producer +from kombu import pools +from kombu.connection import ConnectionPool +from kombu.utils import eqhash + +from .case import Case, Mock + + +class test_ProducerPool(Case): + Pool = pools.ProducerPool + + class MyPool(pools.ProducerPool): + + def __init__(self, *args, **kwargs): + self.instance = Mock() + pools.ProducerPool.__init__(self, *args, **kwargs) + + def Producer(self, connection): + return self.instance + + def setUp(self): + self.connections = Mock() + self.pool = self.Pool(self.connections, limit=10) + + def test_close_resource(self): + self.pool.close_resource(Mock(name='resource')) + + def test_releases_connection_when_Producer_raises(self): + self.pool.Producer = Mock() + self.pool.Producer.side_effect = IOError() + acq = self.pool._acquire_connection = Mock() + conn = acq.return_value = Mock() + with self.assertRaises(IOError): + self.pool.create_producer() + conn.release.assert_called_with() + + def test_prepare_release_connection_on_error(self): + pp = Mock() + p = pp.return_value = Mock() + p.revive.side_effect = IOError() + acq = self.pool._acquire_connection = Mock() + conn = acq.return_value = Mock() + p._channel = None + with self.assertRaises(IOError): + self.pool.prepare(pp) + conn.release.assert_called_with() + + def test_release_releases_connection(self): + p = Mock() + p.__connection__ = Mock() + self.pool.release(p) + p.__connection__.release.assert_called_with() + p.__connection__ = None + self.pool.release(p) + + def test_init(self): + self.assertIs(self.pool.connections, self.connections) + + def test_Producer(self): + self.assertIsInstance(self.pool.Producer(Mock()), Producer) + + def test_acquire_connection(self): + self.pool._acquire_connection() + self.connections.acquire.assert_called_with(block=True) + + def test_new(self): + promise = self.pool.new() + producer = promise() + self.assertIsInstance(producer, Producer) + self.connections.acquire.assert_called_with(block=True) + + def test_setup_unlimited(self): + pool = self.Pool(self.connections, limit=None) + pool.setup() + self.assertFalse(pool._resource.queue) + + def test_setup(self): + self.assertEqual(len(self.pool._resource.queue), self.pool.limit) + + first = self.pool._resource.get_nowait() + producer = first() + self.assertIsInstance(producer, Producer) + + def test_prepare(self): + connection = self.connections.acquire.return_value = Mock() + pool = self.MyPool(self.connections, limit=10) + pool.instance._channel = None + first = pool._resource.get_nowait() + producer = pool.prepare(first) + self.assertTrue(self.connections.acquire.called) + producer.revive.assert_called_with(connection) + + def test_prepare_channel_already_created(self): + self.connections.acquire.return_value = Mock() + pool = self.MyPool(self.connections, limit=10) + pool.instance._channel = Mock() + first = pool._resource.get_nowait() + self.connections.acquire.reset() + producer = pool.prepare(first) + self.assertFalse(producer.revive.called) + + def test_prepare_not_callable(self): + x = Producer(Mock) + self.pool.prepare(x) + + def test_release(self): + p = Mock() + p.channel = Mock() + p.__connection__ = Mock() + self.pool.release(p) + p.__connection__.release.assert_called_with() + self.assertIsNone(p.channel) + + +class test_PoolGroup(Case): + Group = pools.PoolGroup + + class MyGroup(pools.PoolGroup): + + def create(self, resource, limit): + return resource, limit + + def test_interface_create(self): + g = self.Group() + with self.assertRaises(NotImplementedError): + g.create(Mock(), 10) + + def test_getitem_using_global_limit(self): + pools._used[0] = False + g = self.MyGroup(limit=pools.use_global_limit) + res = g['foo'] + self.assertTupleEqual(res, ('foo', pools.get_limit())) + self.assertTrue(pools._used[0]) + + def test_getitem_using_custom_limit(self): + pools._used[0] = True + g = self.MyGroup(limit=102456) + res = g['foo'] + self.assertTupleEqual(res, ('foo', 102456)) + + def test_delitem(self): + g = self.MyGroup() + g['foo'] + del(g['foo']) + self.assertNotIn('foo', g) + + def test_Connections(self): + conn = Connection('memory://') + p = pools.connections[conn] + self.assertTrue(p) + self.assertIsInstance(p, ConnectionPool) + self.assertIs(p.connection, conn) + self.assertEqual(p.limit, pools.get_limit()) + + def test_Producers(self): + conn = Connection('memory://') + p = pools.producers[conn] + self.assertTrue(p) + self.assertIsInstance(p, pools.ProducerPool) + self.assertIs(p.connections, pools.connections[conn]) + self.assertEqual(p.limit, p.connections.limit) + self.assertEqual(p.limit, pools.get_limit()) + + def test_all_groups(self): + conn = Connection('memory://') + pools.connections[conn] + + self.assertTrue(list(pools._all_pools())) + + def test_reset(self): + pools.reset() + + class MyGroup(dict): + clear_called = False + + def clear(self): + self.clear_called = True + + p1 = pools.connections['foo'] = Mock() + g1 = MyGroup() + pools._groups.append(g1) + + pools.reset() + p1.force_close_all.assert_called_with() + self.assertTrue(g1.clear_called) + + p1 = pools.connections['foo'] = Mock() + p1.force_close_all.side_effect = KeyError() + pools.reset() + + def test_set_limit(self): + pools.reset() + pools.set_limit(34576) + limit = pools.get_limit() + self.assertEqual(limit, 34576) + + pools.connections[Connection('memory://')] + pools.set_limit(limit + 1) + self.assertEqual(pools.get_limit(), limit + 1) + limit = pools.get_limit() + with self.assertRaises(RuntimeError): + pools.set_limit(limit - 1) + pools.set_limit(limit - 1, force=True) + self.assertEqual(pools.get_limit(), limit - 1) + + pools.set_limit(pools.get_limit()) + + +class test_fun_PoolGroup(Case): + + def test_connections_behavior(self): + c1u = 'memory://localhost:123' + c2u = 'memory://localhost:124' + c1 = Connection(c1u) + c2 = Connection(c2u) + c3 = Connection(c1u) + + assert eqhash(c1) != eqhash(c2) + assert eqhash(c1) == eqhash(c3) + + c4 = Connection(c1u, transport_options={'confirm_publish': True}) + self.assertNotEqual(eqhash(c3), eqhash(c4)) + + p1 = pools.connections[c1] + p2 = pools.connections[c2] + p3 = pools.connections[c3] + + self.assertIsNot(p1, p2) + self.assertIs(p1, p3) + + r1 = p1.acquire() + self.assertTrue(p1._dirty) + self.assertTrue(p3._dirty) + self.assertFalse(p2._dirty) + r1.release() + self.assertFalse(p1._dirty) + self.assertFalse(p3._dirty) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/test_serialization.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_serialization.py new file mode 100644 index 0000000..142058f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_serialization.py @@ -0,0 +1,347 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import absolute_import, unicode_literals + +import sys + +from base64 import b64decode + +from kombu.exceptions import ContentDisallowed, EncodeError, DecodeError +from kombu.five import text_t, bytes_t +from kombu.serialization import ( + registry, register, SerializerNotInstalled, + raw_encode, register_yaml, register_msgpack, + dumps, loads, pickle, pickle_protocol, + unregister, register_pickle, enable_insecure_serializers, + disable_insecure_serializers, +) +from kombu.utils.encoding import str_to_bytes + +from .case import Case, call, mask_modules, patch, skip_if_not_module + +# For content_encoding tests +unicode_string = 'abcdé\u8463' +unicode_string_as_utf8 = unicode_string.encode('utf-8') +latin_string = 'abcdé' +latin_string_as_latin1 = latin_string.encode('latin-1') +latin_string_as_utf8 = latin_string.encode('utf-8') + + +# For serialization tests +py_data = { + 'string': 'The quick brown fox jumps over the lazy dog', + 'int': 10, + 'float': 3.14159265, + 'unicode': 'Thé quick brown fox jumps over thé lazy dog', + 'list': ['george', 'jerry', 'elaine', 'cosmo'], +} + +# JSON serialization tests +json_data = """\ +{"int": 10, "float": 3.1415926500000002, \ +"list": ["george", "jerry", "elaine", "cosmo"], \ +"string": "The quick brown fox jumps over the lazy \ +dog", "unicode": "Th\\u00e9 quick brown fox jumps over \ +th\\u00e9 lazy dog"}\ +""" + +# Pickle serialization tests +pickle_data = pickle.dumps(py_data, protocol=pickle_protocol) + +# YAML serialization tests +yaml_data = """\ +float: 3.1415926500000002 +int: 10 +list: [george, jerry, elaine, cosmo] +string: The quick brown fox jumps over the lazy dog +unicode: "Th\\xE9 quick brown fox jumps over th\\xE9 lazy dog" +""" + + +msgpack_py_data = dict(py_data) +msgpack_py_data['unicode'] = 'Th quick brown fox jumps over th lazy dog' +# Unicode chars are lost in transmit :( +msgpack_data = b64decode(str_to_bytes("""\ +haNpbnQKpWZsb2F0y0AJIftTyNTxpGxpc3SUpmdlb3JnZaVqZXJyeaZlbGFpbmWlY29zbW+mc3Rya\ +W5n2gArVGhlIHF1aWNrIGJyb3duIGZveCBqdW1wcyBvdmVyIHRoZSBsYXp5IGRvZ6d1bmljb2Rl2g\ +ApVGggcXVpY2sgYnJvd24gZm94IGp1bXBzIG92ZXIgdGggbGF6eSBkb2c=\ +""")) + + +def say(m): + sys.stderr.write('%s\n' % (m, )) + + +registry.register('testS', lambda s: s, lambda s: 'decoded', + 'application/testS', 'utf-8') + + +class test_Serialization(Case): + + def test_disable(self): + disabled = registry._disabled_content_types + try: + registry.disable('testS') + self.assertIn('application/testS', disabled) + disabled.clear() + + registry.disable('application/testS') + self.assertIn('application/testS', disabled) + finally: + disabled.clear() + + def test_enable(self): + registry._disabled_content_types.add('application/json') + registry.enable('json') + self.assertNotIn('application/json', registry._disabled_content_types) + registry._disabled_content_types.add('application/json') + registry.enable('application/json') + self.assertNotIn('application/json', registry._disabled_content_types) + + def test_loads_when_disabled(self): + disabled = registry._disabled_content_types + try: + registry.disable('testS') + + with self.assertRaises(SerializerNotInstalled): + loads('xxd', 'application/testS', 'utf-8', force=False) + + ret = loads('xxd', 'application/testS', 'utf-8', force=True) + self.assertEqual(ret, 'decoded') + finally: + disabled.clear() + + def test_loads_when_data_is_None(self): + loads(None, 'application/testS', 'utf-8') + + def test_content_type_decoding(self): + self.assertEqual( + unicode_string, + loads(unicode_string_as_utf8, + content_type='plain/text', content_encoding='utf-8'), + ) + self.assertEqual( + latin_string, + loads(latin_string_as_latin1, + content_type='application/data', content_encoding='latin-1'), + ) + + def test_content_type_binary(self): + self.assertIsInstance( + loads(unicode_string_as_utf8, + content_type='application/data', content_encoding='binary'), + bytes_t, + ) + + self.assertEqual( + unicode_string_as_utf8, + loads(unicode_string_as_utf8, + content_type='application/data', content_encoding='binary'), + ) + + def test_content_type_encoding(self): + # Using the 'raw' serializer + self.assertEqual( + unicode_string_as_utf8, + dumps(unicode_string, serializer='raw')[-1], + ) + self.assertEqual( + latin_string_as_utf8, + dumps(latin_string, serializer='raw')[-1], + ) + # And again w/o a specific serializer to check the + # code where we force unicode objects into a string. + self.assertEqual( + unicode_string_as_utf8, + dumps(unicode_string)[-1], + ) + self.assertEqual( + latin_string_as_utf8, + dumps(latin_string)[-1], + ) + + def test_enable_insecure_serializers(self): + with patch('kombu.serialization.registry') as registry: + enable_insecure_serializers() + registry.assert_has_calls([ + call.enable('pickle'), call.enable('yaml'), + call.enable('msgpack'), + ]) + registry.enable.side_effect = KeyError() + enable_insecure_serializers() + + with patch('kombu.serialization.registry') as registry: + enable_insecure_serializers(['msgpack']) + registry.assert_has_calls([call.enable('msgpack')]) + + def test_disable_insecure_serializers(self): + with patch('kombu.serialization.registry') as registry: + registry._decoders = ['pickle', 'yaml', 'doomsday'] + disable_insecure_serializers(allowed=['doomsday']) + registry.disable.assert_has_calls([call('pickle'), call('yaml')]) + registry.enable.assert_has_calls([call('doomsday')]) + disable_insecure_serializers(allowed=None) + registry.disable.assert_has_calls([ + call('pickle'), call('yaml'), call('doomsday') + ]) + + def test_reraises_EncodeError(self): + with self.assertRaises(EncodeError): + dumps([object()], serializer='json') + + def test_reraises_DecodeError(self): + with self.assertRaises(DecodeError): + loads(object(), content_type='application/json', + content_encoding='utf-8') + + def test_json_loads(self): + self.assertEqual( + py_data, + loads(json_data, + content_type='application/json', content_encoding='utf-8'), + ) + + def test_json_dumps(self): + self.assertEqual( + loads( + dumps(py_data, serializer='json')[-1], + content_type='application/json', + content_encoding='utf-8', + ), + loads( + json_data, + content_type='application/json', + content_encoding='utf-8', + ), + ) + + @skip_if_not_module('msgpack', (ImportError, ValueError)) + def test_msgpack_loads(self): + register_msgpack() + res = loads(msgpack_data, + content_type='application/x-msgpack', + content_encoding='binary') + if sys.version_info[0] < 3: + for k, v in res.items(): + if isinstance(v, text_t): + res[k] = v.encode() + if isinstance(v, (list, tuple)): + res[k] = [i.encode() for i in v] + self.assertEqual( + msgpack_py_data, + res, + ) + + @skip_if_not_module('msgpack', (ImportError, ValueError)) + def test_msgpack_dumps(self): + register_msgpack() + self.assertEqual( + loads( + dumps(msgpack_py_data, serializer='msgpack')[-1], + content_type='application/x-msgpack', + content_encoding='binary', + ), + loads( + msgpack_data, + content_type='application/x-msgpack', + content_encoding='binary', + ), + ) + + @skip_if_not_module('yaml') + def test_yaml_loads(self): + register_yaml() + self.assertEqual( + py_data, + loads(yaml_data, + content_type='application/x-yaml', + content_encoding='utf-8'), + ) + + @skip_if_not_module('yaml') + def test_yaml_dumps(self): + register_yaml() + self.assertEqual( + loads( + dumps(py_data, serializer='yaml')[-1], + content_type='application/x-yaml', + content_encoding='utf-8', + ), + loads( + yaml_data, + content_type='application/x-yaml', + content_encoding='utf-8', + ), + ) + + def test_pickle_loads(self): + self.assertEqual( + py_data, + loads(pickle_data, + content_type='application/x-python-serialize', + content_encoding='binary'), + ) + + def test_pickle_dumps(self): + self.assertEqual( + pickle.loads(pickle_data), + pickle.loads(dumps(py_data, serializer='pickle')[-1]), + ) + + def test_register(self): + register(None, None, None, None) + + def test_unregister(self): + with self.assertRaises(SerializerNotInstalled): + unregister('nonexisting') + dumps('foo', serializer='pickle') + unregister('pickle') + with self.assertRaises(SerializerNotInstalled): + dumps('foo', serializer='pickle') + register_pickle() + + def test_set_default_serializer_missing(self): + with self.assertRaises(SerializerNotInstalled): + registry._set_default_serializer('nonexisting') + + def test_dumps_missing(self): + with self.assertRaises(SerializerNotInstalled): + dumps('foo', serializer='nonexisting') + + def test_dumps__no_serializer(self): + ctyp, cenc, data = dumps(str_to_bytes('foo')) + self.assertEqual(ctyp, 'application/data') + self.assertEqual(cenc, 'binary') + + def test_loads__trusted_content(self): + loads('tainted', 'application/data', 'binary', accept=[]) + loads('tainted', 'application/text', 'utf-8', accept=[]) + + def test_loads__not_accepted(self): + with self.assertRaises(ContentDisallowed): + loads('tainted', 'application/x-evil', 'binary', accept=[]) + with self.assertRaises(ContentDisallowed): + loads('tainted', 'application/x-evil', 'binary', + accept=['application/x-json']) + self.assertTrue( + loads('tainted', 'application/x-doomsday', 'binary', + accept=['application/x-doomsday']) + ) + + def test_raw_encode(self): + self.assertTupleEqual( + raw_encode('foo'.encode('utf-8')), + ('application/data', 'binary', 'foo'.encode('utf-8')), + ) + + @mask_modules('yaml') + def test_register_yaml__no_yaml(self): + register_yaml() + with self.assertRaises(SerializerNotInstalled): + loads('foo', 'application/x-yaml', 'utf-8') + + @mask_modules('msgpack') + def test_register_msgpack__no_msgpack(self): + register_msgpack() + with self.assertRaises(SerializerNotInstalled): + loads('foo', 'application/x-msgpack', 'utf-8') diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/test_simple.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_simple.py new file mode 100644 index 0000000..53a4ac3 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_simple.py @@ -0,0 +1,136 @@ +from __future__ import absolute_import + +from kombu import Connection, Exchange, Queue + +from .case import Case, Mock + + +class SimpleBase(Case): + abstract = True + + def Queue(self, name, *args, **kwargs): + q = name + if not isinstance(q, Queue): + q = self.__class__.__name__ + if name: + q = '%s.%s' % (q, name) + return self._Queue(q, *args, **kwargs) + + def _Queue(self, *args, **kwargs): + raise NotImplementedError() + + def setUp(self): + if not self.abstract: + self.connection = Connection(transport='memory') + with self.connection.channel() as channel: + channel.exchange_declare('amq.direct') + self.q = self.Queue(None, no_ack=True) + + def tearDown(self): + if not self.abstract: + self.q.close() + self.connection.close() + + def test_produce__consume(self): + if self.abstract: + return + q = self.Queue('test_produce__consume', no_ack=True) + + q.put({'hello': 'Simple'}) + + self.assertEqual(q.get(timeout=1).payload, {'hello': 'Simple'}) + with self.assertRaises(q.Empty): + q.get(timeout=0.1) + + def test_produce__basic_get(self): + if self.abstract: + return + q = self.Queue('test_produce__basic_get', no_ack=True) + q.put({'hello': 'SimpleSync'}) + self.assertEqual(q.get_nowait().payload, {'hello': 'SimpleSync'}) + with self.assertRaises(q.Empty): + q.get_nowait() + + q.put({'hello': 'SimpleSync'}) + self.assertEqual(q.get(block=False).payload, {'hello': 'SimpleSync'}) + with self.assertRaises(q.Empty): + q.get(block=False) + + def test_clear(self): + if self.abstract: + return + q = self.Queue('test_clear', no_ack=True) + + for i in range(10): + q.put({'hello': 'SimplePurge%d' % (i, )}) + + self.assertEqual(q.clear(), 10) + + def test_enter_exit(self): + if self.abstract: + return + q = self.Queue('test_enter_exit') + q.close = Mock() + + self.assertIs(q.__enter__(), q) + q.__exit__() + q.close.assert_called_with() + + def test_qsize(self): + if self.abstract: + return + q = self.Queue('test_clear', no_ack=True) + + for i in range(10): + q.put({'hello': 'SimplePurge%d' % (i, )}) + + self.assertEqual(q.qsize(), 10) + self.assertEqual(len(q), 10) + + def test_autoclose(self): + if self.abstract: + return + channel = self.connection.channel() + q = self.Queue('test_autoclose', no_ack=True, channel=channel) + q.close() + + def test_custom_Queue(self): + if self.abstract: + return + n = self.__class__.__name__ + exchange = Exchange('%s-test.custom.Queue' % (n, )) + queue = Queue('%s-test.custom.Queue' % (n, ), + exchange, + 'my.routing.key') + + q = self.Queue(queue) + self.assertEqual(q.consumer.queues[0], queue) + q.close() + + def test_bool(self): + if self.abstract: + return + q = self.Queue('test_nonzero') + self.assertTrue(q) + + +class test_SimpleQueue(SimpleBase): + abstract = False + + def _Queue(self, *args, **kwargs): + return self.connection.SimpleQueue(*args, **kwargs) + + def test_is_ack(self): + q = self.Queue('test_is_no_ack') + self.assertFalse(q.no_ack) + + +class test_SimpleBuffer(SimpleBase): + abstract = False + + def Queue(self, *args, **kwargs): + return self.connection.SimpleBuffer(*args, **kwargs) + + def test_is_no_ack(self): + q = self.Queue('test_is_no_ack') + self.assertTrue(q.no_ack) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/test_syn.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_syn.py new file mode 100644 index 0000000..34e5803 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/test_syn.py @@ -0,0 +1,61 @@ +from __future__ import absolute_import + +import socket +import sys +import types + +from kombu import syn + +from kombu.tests.case import Case, patch, module_exists + + +class test_syn(Case): + + def test_compat(self): + self.assertEqual(syn.blocking(lambda: 10), 10) + syn.select_blocking_method('foo') + + def test_detect_environment(self): + try: + syn._environment = None + X = syn.detect_environment() + self.assertEqual(syn._environment, X) + Y = syn.detect_environment() + self.assertEqual(Y, X) + finally: + syn._environment = None + + @module_exists('eventlet', 'eventlet.patcher') + def test_detect_environment_eventlet(self): + with patch('eventlet.patcher.is_monkey_patched', create=True) as m: + self.assertTrue(sys.modules['eventlet']) + m.return_value = True + env = syn._detect_environment() + m.assert_called_with(socket) + self.assertEqual(env, 'eventlet') + + @module_exists('gevent') + def test_detect_environment_gevent(self): + with patch('gevent.socket', create=True) as m: + prev, socket.socket = socket.socket, m.socket + try: + self.assertTrue(sys.modules['gevent']) + env = syn._detect_environment() + self.assertEqual(env, 'gevent') + finally: + socket.socket = prev + + def test_detect_environment_no_eventlet_or_gevent(self): + try: + sys.modules['eventlet'] = types.ModuleType('eventlet') + sys.modules['eventlet.patcher'] = types.ModuleType('eventlet') + self.assertEqual(syn._detect_environment(), 'default') + finally: + sys.modules.pop('eventlet', None) + syn._detect_environment() + try: + sys.modules['gevent'] = types.ModuleType('gevent') + self.assertEqual(syn._detect_environment(), 'default') + finally: + sys.modules.pop('gevent', None) + syn._detect_environment() diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/__init__.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_SQS.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_SQS.py new file mode 100644 index 0000000..117135c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_SQS.py @@ -0,0 +1,302 @@ +"""Testing module for the kombu.transport.SQS package. + +NOTE: The SQSQueueMock and SQSConnectionMock classes originally come from +http://github.com/pcsforeducation/sqs-mock-python. They have been patched +slightly. +""" + +from __future__ import absolute_import + +import sys + +from kombu import Connection +from kombu import messaging +from kombu import five +from kombu.tests.case import Case, SkipTest +import kombu + + +if sys.version_info[0] >= 3: + SQS, skip_reason = None, 'boto does not support Python 3' # noqa +else: + try: + from kombu.transport import SQS + except ImportError: + # Boto must not be installed if the SQS transport fails to import, + # so we skip all unit tests. Set SQS to None here, and it will be + # checked during the setUp() phase later. + SQS, skip_reason = None, 'boto not installed' # noqa + + +class SQSQueueMock(object): + + def __init__(self, name): + self.name = name + self.messages = [] + self._get_message_calls = 0 + + def clear(self, page_size=10, vtimeout=10): + empty, self.messages[:] = not self.messages, [] + return not empty + + def count(self, page_size=10, vtimeout=10): + return len(self.messages) + count_slow = count + + def delete(self): + self.messages[:] = [] + return True + + def delete_message(self, message): + try: + self.messages.remove(message) + except ValueError: + return False + return True + + def get_messages(self, num_messages=1, visibility_timeout=None, + attributes=None, *args, **kwargs): + self._get_message_calls += 1 + return self.messages[:num_messages] + + def read(self, visibility_timeout=None): + return self.messages.pop(0) + + def write(self, message): + self.messages.append(message) + return True + + +class SQSConnectionMock(object): + + def __init__(self): + self.queues = {} + + def get_queue(self, queue): + return self.queues.get(queue) + + def get_all_queues(self, prefix=""): + return self.queues.values() + + def delete_queue(self, queue, force_deletion=False): + q = self.get_queue(queue) + if q: + if q.count(): + return False + q.clear() + self.queues.pop(queue, None) + + def delete_message(self, queue, message): + return queue.delete_message(message) + + def create_queue(self, name, *args, **kwargs): + q = self.queues[name] = SQSQueueMock(name) + return q + + +class test_Channel(Case): + + def handleMessageCallback(self, message): + self.callback_message = message + + def setUp(self): + """Mock the back-end SQS classes""" + # Sanity check... if SQS is None, then it did not import and we + # cannot execute our tests. + if SQS is None: + raise SkipTest(skip_reason) + + SQS.Channel._queue_cache.clear() + + # Common variables used in the unit tests + self.queue_name = 'unittest' + + # Mock the sqs() method that returns an SQSConnection object and + # instead return an SQSConnectionMock() object. + self.sqs_conn_mock = SQSConnectionMock() + + def mock_sqs(): + return self.sqs_conn_mock + SQS.Channel.sqs = mock_sqs() + + # Set up a task exchange for passing tasks through the queue + self.exchange = kombu.Exchange('test_SQS', type='direct') + self.queue = kombu.Queue(self.queue_name, + self.exchange, + self.queue_name) + + # Mock up a test SQS Queue with the SQSQueueMock class (and always + # make sure its a clean empty queue) + self.sqs_queue_mock = SQSQueueMock(self.queue_name) + + # Now, create our Connection object with the SQS Transport and store + # the connection/channel objects as references for use in these tests. + self.connection = Connection(transport=SQS.Transport) + self.channel = self.connection.channel() + + self.queue(self.channel).declare() + self.producer = messaging.Producer(self.channel, + self.exchange, + routing_key=self.queue_name) + + # Lastly, make sure that we're set up to 'consume' this queue. + self.channel.basic_consume(self.queue_name, + no_ack=True, + callback=self.handleMessageCallback, + consumer_tag='unittest') + + def test_init(self): + """kombu.SQS.Channel instantiates correctly with mocked queues""" + self.assertIn(self.queue_name, self.channel._queue_cache) + + def test_new_queue(self): + queue_name = 'new_unittest_queue' + self.channel._new_queue(queue_name) + self.assertIn(queue_name, self.sqs_conn_mock.queues) + # For cleanup purposes, delete the queue and the queue file + self.channel._delete(queue_name) + + def test_delete(self): + queue_name = 'new_unittest_queue' + self.channel._new_queue(queue_name) + self.channel._delete(queue_name) + self.assertNotIn(queue_name, self.channel._queue_cache) + + def test_get_from_sqs(self): + # Test getting a single message + message = 'my test message' + self.producer.publish(message) + results = self.channel._get_from_sqs(self.queue_name) + self.assertEqual(len(results), 1) + + # Now test getting many messages + for i in range(3): + message = 'message: {0}'.format(i) + self.producer.publish(message) + + results = self.channel._get_from_sqs(self.queue_name, count=3) + self.assertEqual(len(results), 3) + + def test_get_with_empty_list(self): + with self.assertRaises(five.Empty): + self.channel._get(self.queue_name) + + def test_get_bulk_raises_empty(self): + with self.assertRaises(five.Empty): + self.channel._get_bulk(self.queue_name) + + def test_messages_to_python(self): + message_count = 3 + # Create several test messages and publish them + for i in range(message_count): + message = 'message: %s' % i + self.producer.publish(message) + + # Get the messages now + messages = self.channel._get_from_sqs( + self.queue_name, count=message_count, + ) + + # Now convert them to payloads + payloads = self.channel._messages_to_python( + messages, self.queue_name, + ) + + # We got the same number of payloads back, right? + self.assertEqual(len(payloads), message_count) + + # Make sure they're payload-style objects + for p in payloads: + self.assertTrue('properties' in p) + + def test_put_and_get(self): + message = 'my test message' + self.producer.publish(message) + results = self.queue(self.channel).get().payload + self.assertEqual(message, results) + + def test_puts_and_gets(self): + for i in range(3): + message = 'message: %s' % i + self.producer.publish(message) + + for i in range(3): + self.assertEqual('message: %s' % i, + self.queue(self.channel).get().payload) + + def test_put_and_get_bulk(self): + # With QoS.prefetch_count = 0 + message = 'my test message' + self.producer.publish(message) + results = self.channel._get_bulk(self.queue_name) + self.assertEqual(1, len(results)) + + def test_puts_and_get_bulk(self): + # Generate 8 messages + message_count = 8 + + # Set the prefetch_count to 5 + self.channel.qos.prefetch_count = 5 + + # Now, generate all the messages + for i in range(message_count): + message = 'message: %s' % i + self.producer.publish(message) + + # Count how many messages are retrieved the first time. Should + # be 5 (message_count). + results = self.channel._get_bulk(self.queue_name) + self.assertEqual(5, len(results)) + + # Now, do the get again, the number of messages returned should be 3. + results = self.channel._get_bulk(self.queue_name) + self.assertEqual(3, len(results)) + + def test_drain_events_with_empty_list(self): + def mock_can_consume(): + return False + self.channel.qos.can_consume = mock_can_consume + with self.assertRaises(five.Empty): + self.channel.drain_events() + + def test_drain_events_with_prefetch_5(self): + # Generate 20 messages + message_count = 20 + expected_get_message_count = 4 + + # Set the prefetch_count to 5 + self.channel.qos.prefetch_count = 5 + + # Now, generate all the messages + for i in range(message_count): + self.producer.publish('message: %s' % i) + + # Now drain all the events + for i in range(message_count): + self.channel.drain_events() + + # How many times was the SQSConnectionMock get_message method called? + self.assertEqual( + expected_get_message_count, + self.channel._queue_cache[self.queue_name]._get_message_calls) + + def test_drain_events_with_prefetch_none(self): + # Generate 20 messages + message_count = 20 + expected_get_message_count = 2 + + # Set the prefetch_count to None + self.channel.qos.prefetch_count = None + + # Now, generate all the messages + for i in range(message_count): + self.producer.publish('message: %s' % i) + + # Now drain all the events + for i in range(message_count): + self.channel.drain_events() + + # How many times was the SQSConnectionMock get_message method called? + self.assertEqual( + expected_get_message_count, + self.channel._queue_cache[self.queue_name]._get_message_calls) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_amqplib.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_amqplib.py new file mode 100644 index 0000000..cf7d615 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_amqplib.py @@ -0,0 +1,162 @@ +from __future__ import absolute_import + +import sys + +from kombu import Connection + +from kombu.tests.case import Case, SkipTest, Mock, mask_modules + + +class MockConnection(dict): + + def __setattr__(self, key, value): + self[key] = value + +try: + __import__('amqplib') +except ImportError: + amqplib = Channel = None +else: + from kombu.transport import amqplib + + class Channel(amqplib.Channel): + wait_returns = [] + + def _x_open(self, *args, **kwargs): + pass + + def wait(self, *args, **kwargs): + return self.wait_returns + + def _send_method(self, *args, **kwargs): + pass + + +class amqplibCase(Case): + + def setUp(self): + if amqplib is None: + raise SkipTest('amqplib not installed') + self.setup() + + def setup(self): + pass + + +class test_Channel(amqplibCase): + + def setup(self): + self.conn = Mock() + self.conn.channels = {} + self.channel = Channel(self.conn, 0) + + def test_init(self): + self.assertFalse(self.channel.no_ack_consumers) + + def test_prepare_message(self): + self.assertTrue(self.channel.prepare_message( + 'foobar', 10, 'application/data', 'utf-8', + properties={}, + )) + + def test_message_to_python(self): + message = Mock() + message.headers = {} + message.properties = {} + self.assertTrue(self.channel.message_to_python(message)) + + def test_close_resolves_connection_cycle(self): + self.assertIsNotNone(self.channel.connection) + self.channel.close() + self.assertIsNone(self.channel.connection) + + def test_basic_consume_registers_ack_status(self): + self.channel.wait_returns = 'my-consumer-tag' + self.channel.basic_consume('foo', no_ack=True) + self.assertIn('my-consumer-tag', self.channel.no_ack_consumers) + + self.channel.wait_returns = 'other-consumer-tag' + self.channel.basic_consume('bar', no_ack=False) + self.assertNotIn('other-consumer-tag', self.channel.no_ack_consumers) + + self.channel.basic_cancel('my-consumer-tag') + self.assertNotIn('my-consumer-tag', self.channel.no_ack_consumers) + + +class test_Transport(amqplibCase): + + def setup(self): + self.connection = Connection('amqplib://') + self.transport = self.connection.transport + + def test_create_channel(self): + connection = Mock() + self.transport.create_channel(connection) + connection.channel.assert_called_with() + + def test_drain_events(self): + connection = Mock() + self.transport.drain_events(connection, timeout=10.0) + connection.drain_events.assert_called_with(timeout=10.0) + + def test_dnspython_localhost_resolve_bug(self): + + class Conn(object): + + def __init__(self, **kwargs): + vars(self).update(kwargs) + + self.transport.Connection = Conn + self.transport.client.hostname = 'localhost' + conn1 = self.transport.establish_connection() + self.assertEqual(conn1.host, '127.0.0.1:5672') + + self.transport.client.hostname = 'example.com' + conn2 = self.transport.establish_connection() + self.assertEqual(conn2.host, 'example.com:5672') + + def test_close_connection(self): + connection = Mock() + connection.client = Mock() + self.transport.close_connection(connection) + + self.assertIsNone(connection.client) + connection.close.assert_called_with() + + def test_verify_connection(self): + connection = Mock() + connection.channels = None + self.assertFalse(self.transport.verify_connection(connection)) + + connection.channels = {1: 1, 2: 2} + self.assertTrue(self.transport.verify_connection(connection)) + + @mask_modules('ssl') + def test_import_no_ssl(self): + pm = sys.modules.pop('kombu.transport.amqplib') + try: + from kombu.transport.amqplib import SSLError + self.assertEqual(SSLError.__module__, 'kombu.transport.amqplib') + finally: + if pm is not None: + sys.modules['kombu.transport.amqplib'] = pm + + +class test_amqplib(amqplibCase): + + def test_default_port(self): + + class Transport(amqplib.Transport): + Connection = MockConnection + + c = Connection(port=None, transport=Transport).connect() + self.assertEqual(c['host'], + '127.0.0.1:%s' % (Transport.default_port, )) + + def test_custom_port(self): + + class Transport(amqplib.Transport): + Connection = MockConnection + + c = Connection(port=1337, transport=Transport).connect() + self.assertEqual(c['host'], '127.0.0.1:1337') diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_base.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_base.py new file mode 100644 index 0000000..5c4a50d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_base.py @@ -0,0 +1,148 @@ +from __future__ import absolute_import + +from kombu import Connection, Consumer, Exchange, Producer, Queue +from kombu.five import text_t +from kombu.message import Message +from kombu.transport.base import StdChannel, Transport, Management + +from kombu.tests.case import Case, Mock + + +class test_StdChannel(Case): + + def setUp(self): + self.conn = Connection('memory://') + self.channel = self.conn.channel() + self.channel.queues.clear() + self.conn.connection.state.clear() + + def test_Consumer(self): + q = Queue('foo', Exchange('foo')) + print(self.channel.queues) + cons = self.channel.Consumer(q) + self.assertIsInstance(cons, Consumer) + self.assertIs(cons.channel, self.channel) + + def test_Producer(self): + prod = self.channel.Producer() + self.assertIsInstance(prod, Producer) + self.assertIs(prod.channel, self.channel) + + def test_interface_get_bindings(self): + with self.assertRaises(NotImplementedError): + StdChannel().get_bindings() + + def test_interface_after_reply_message_received(self): + self.assertIsNone( + StdChannel().after_reply_message_received(Queue('foo')), + ) + + +class test_Message(Case): + + def setUp(self): + self.conn = Connection('memory://') + self.channel = self.conn.channel() + self.message = Message(self.channel, delivery_tag=313) + + def test_postencode(self): + m = Message(self.channel, text_t('FOO'), postencode='ccyzz') + with self.assertRaises(LookupError): + m._reraise_error() + m.ack() + + def test_ack_respects_no_ack_consumers(self): + self.channel.no_ack_consumers = set(['abc']) + self.message.delivery_info['consumer_tag'] = 'abc' + ack = self.channel.basic_ack = Mock() + + self.message.ack() + self.assertNotEqual(self.message._state, 'ACK') + self.assertFalse(ack.called) + + def test_ack_missing_consumer_tag(self): + self.channel.no_ack_consumers = set(['abc']) + self.message.delivery_info = {} + ack = self.channel.basic_ack = Mock() + + self.message.ack() + ack.assert_called_with(self.message.delivery_tag) + + def test_ack_not_no_ack(self): + self.channel.no_ack_consumers = set() + self.message.delivery_info['consumer_tag'] = 'abc' + ack = self.channel.basic_ack = Mock() + + self.message.ack() + ack.assert_called_with(self.message.delivery_tag) + + def test_ack_log_error_when_no_error(self): + ack = self.message.ack = Mock() + self.message.ack_log_error(Mock(), KeyError) + ack.assert_called_with() + + def test_ack_log_error_when_error(self): + ack = self.message.ack = Mock() + ack.side_effect = KeyError('foo') + logger = Mock() + self.message.ack_log_error(logger, KeyError) + ack.assert_called_with() + self.assertTrue(logger.critical.called) + self.assertIn("Couldn't ack", logger.critical.call_args[0][0]) + + def test_reject_log_error_when_no_error(self): + reject = self.message.reject = Mock() + self.message.reject_log_error(Mock(), KeyError, requeue=True) + reject.assert_called_with(requeue=True) + + def test_reject_log_error_when_error(self): + reject = self.message.reject = Mock() + reject.side_effect = KeyError('foo') + logger = Mock() + self.message.reject_log_error(logger, KeyError) + reject.assert_called_with(requeue=False) + self.assertTrue(logger.critical.called) + self.assertIn("Couldn't reject", logger.critical.call_args[0][0]) + + +class test_interface(Case): + + def test_establish_connection(self): + with self.assertRaises(NotImplementedError): + Transport(None).establish_connection() + + def test_close_connection(self): + with self.assertRaises(NotImplementedError): + Transport(None).close_connection(None) + + def test_create_channel(self): + with self.assertRaises(NotImplementedError): + Transport(None).create_channel(None) + + def test_close_channel(self): + with self.assertRaises(NotImplementedError): + Transport(None).close_channel(None) + + def test_drain_events(self): + with self.assertRaises(NotImplementedError): + Transport(None).drain_events(None) + + def test_heartbeat_check(self): + Transport(None).heartbeat_check(Mock(name='connection')) + + def test_driver_version(self): + self.assertTrue(Transport(None).driver_version()) + + def test_register_with_event_loop(self): + Transport(None).register_with_event_loop(Mock(name='loop')) + + def test_manager(self): + self.assertTrue(Transport(None).manager) + + +class test_Management(Case): + + def test_get_bindings(self): + m = Management(Mock(name='transport')) + with self.assertRaises(NotImplementedError): + m.get_bindings() diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_filesystem.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_filesystem.py new file mode 100644 index 0000000..0649a8d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_filesystem.py @@ -0,0 +1,123 @@ +from __future__ import absolute_import + +import sys +import tempfile + +from kombu import Connection, Exchange, Queue, Consumer, Producer + +from kombu.tests.case import Case, SkipTest + + +class test_FilesystemTransport(Case): + + def setUp(self): + if sys.platform == 'win32': + raise SkipTest('Needs win32con module') + try: + data_folder_in = tempfile.mkdtemp() + data_folder_out = tempfile.mkdtemp() + except Exception: + raise SkipTest('filesystem transport: cannot create tempfiles') + self.c = Connection(transport='filesystem', + transport_options={ + 'data_folder_in': data_folder_in, + 'data_folder_out': data_folder_out, + }) + self.p = Connection(transport='filesystem', + transport_options={ + 'data_folder_in': data_folder_out, + 'data_folder_out': data_folder_in, + }) + self.e = Exchange('test_transport_filesystem') + self.q = Queue('test_transport_filesystem', + exchange=self.e, + routing_key='test_transport_filesystem') + self.q2 = Queue('test_transport_filesystem2', + exchange=self.e, + routing_key='test_transport_filesystem2') + + def test_produce_consume_noack(self): + producer = Producer(self.p.channel(), self.e) + consumer = Consumer(self.c.channel(), self.q, no_ack=True) + + for i in range(10): + producer.publish({'foo': i}, + routing_key='test_transport_filesystem') + + _received = [] + + def callback(message_data, message): + _received.append(message) + + consumer.register_callback(callback) + consumer.consume() + + while 1: + if len(_received) == 10: + break + self.c.drain_events() + + self.assertEqual(len(_received), 10) + + def test_produce_consume(self): + producer_channel = self.p.channel() + consumer_channel = self.c.channel() + producer = Producer(producer_channel, self.e) + consumer1 = Consumer(consumer_channel, self.q) + consumer2 = Consumer(consumer_channel, self.q2) + self.q2(consumer_channel).declare() + + for i in range(10): + producer.publish({'foo': i}, + routing_key='test_transport_filesystem') + for i in range(10): + producer.publish({'foo': i}, + routing_key='test_transport_filesystem2') + + _received1 = [] + _received2 = [] + + def callback1(message_data, message): + _received1.append(message) + message.ack() + + def callback2(message_data, message): + _received2.append(message) + message.ack() + + consumer1.register_callback(callback1) + consumer2.register_callback(callback2) + + consumer1.consume() + consumer2.consume() + + while 1: + if len(_received1) + len(_received2) == 20: + break + self.c.drain_events() + + self.assertEqual(len(_received1) + len(_received2), 20) + + # compression + producer.publish({'compressed': True}, + routing_key='test_transport_filesystem', + compression='zlib') + m = self.q(consumer_channel).get() + self.assertDictEqual(m.payload, {'compressed': True}) + + # queue.delete + for i in range(10): + producer.publish({'foo': i}, + routing_key='test_transport_filesystem') + self.assertTrue(self.q(consumer_channel).get()) + self.q(consumer_channel).delete() + self.q(consumer_channel).declare() + self.assertIsNone(self.q(consumer_channel).get()) + + # queue.purge + for i in range(10): + producer.publish({'foo': i}, + routing_key='test_transport_filesystem2') + self.assertTrue(self.q2(consumer_channel).get()) + self.q2(consumer_channel).purge() + self.assertIsNone(self.q2(consumer_channel).get()) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_librabbitmq.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_librabbitmq.py new file mode 100644 index 0000000..a50b262 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_librabbitmq.py @@ -0,0 +1,150 @@ +from __future__ import absolute_import + +try: + import librabbitmq +except ImportError: + librabbitmq = None # noqa +else: + from kombu.transport import librabbitmq # noqa + +from kombu.tests.case import Case, Mock, SkipTest, patch + + +class lrmqCase(Case): + + def setUp(self): + if librabbitmq is None: + raise SkipTest('librabbitmq is not installed') + + +class test_Message(lrmqCase): + + def test_init(self): + chan = Mock(name='channel') + message = librabbitmq.Message( + chan, {'prop': 42}, {'delivery_tag': 337}, 'body', + ) + self.assertEqual(message.body, 'body') + self.assertEqual(message.delivery_tag, 337) + self.assertEqual(message.properties['prop'], 42) + + +class test_Channel(lrmqCase): + + def test_prepare_message(self): + conn = Mock(name='connection') + chan = librabbitmq.Channel(conn, 1) + self.assertTrue(chan) + + body = 'the quick brown fox...' + properties = {'name': 'Elaine M.'} + + body2, props2 = chan.prepare_message( + body, properties=properties, + priority=999, + content_type='ctype', + content_encoding='cenc', + headers={'H': 2}, + ) + + self.assertEqual(props2['name'], 'Elaine M.') + self.assertEqual(props2['priority'], 999) + self.assertEqual(props2['content_type'], 'ctype') + self.assertEqual(props2['content_encoding'], 'cenc') + self.assertEqual(props2['headers'], {'H': 2}) + self.assertEqual(body2, body) + + body3, props3 = chan.prepare_message(body, priority=777) + self.assertEqual(props3['priority'], 777) + self.assertEqual(body3, body) + + +class test_Transport(lrmqCase): + + def setUp(self): + super(test_Transport, self).setUp() + self.client = Mock(name='client') + self.T = librabbitmq.Transport(self.client) + + def test_driver_version(self): + self.assertTrue(self.T.driver_version()) + + def test_create_channel(self): + conn = Mock(name='connection') + chan = self.T.create_channel(conn) + self.assertTrue(chan) + conn.channel.assert_called_with() + + def test_drain_events(self): + conn = Mock(name='connection') + self.T.drain_events(conn, timeout=1.33) + conn.drain_events.assert_called_with(timeout=1.33) + + def test_establish_connection_SSL_not_supported(self): + self.client.ssl = True + with self.assertRaises(NotImplementedError): + self.T.establish_connection() + + def test_establish_connection(self): + self.T.Connection = Mock(name='Connection') + self.T.client.ssl = False + self.T.client.port = None + self.T.client.transport_options = {} + + conn = self.T.establish_connection() + self.assertEqual( + self.T.client.port, + self.T.default_connection_params['port'], + ) + self.assertEqual(conn.client, self.T.client) + self.assertEqual(self.T.client.drain_events, conn.drain_events) + + def test_collect__no_conn(self): + self.T.client.drain_events = 1234 + self.T._collect(None) + self.assertIsNone(self.client.drain_events) + self.assertIsNone(self.T.client) + + def test_collect__with_conn(self): + self.T.client.drain_events = 1234 + conn = Mock(name='connection') + chans = conn.channels = {1: Mock(name='chan1'), 2: Mock(name='chan2')} + conn.callbacks = {'foo': Mock(name='cb1'), 'bar': Mock(name='cb2')} + for i, chan in enumerate(conn.channels.values()): + chan.connection = i + + with patch('os.close') as close: + self.T._collect(conn) + close.assert_called_with(conn.fileno()) + self.assertFalse(conn.channels) + self.assertFalse(conn.callbacks) + for chan in chans.values(): + self.assertIsNone(chan.connection) + self.assertIsNone(self.client.drain_events) + self.assertIsNone(self.T.client) + + with patch('os.close') as close: + self.T.client = self.client + close.side_effect = OSError() + self.T._collect(conn) + close.assert_called_with(conn.fileno()) + + def test_register_with_event_loop(self): + conn = Mock(name='conn') + loop = Mock(name='loop') + self.T.register_with_event_loop(conn, loop) + loop.add_reader.assert_called_with( + conn.fileno(), self.T.on_readable, conn, loop, + ) + + def test_verify_connection(self): + conn = Mock(name='connection') + conn.connected = True + self.assertTrue(self.T.verify_connection(conn)) + + def test_close_connection(self): + conn = Mock(name='connection') + self.client.drain_events = 1234 + self.T.close_connection(conn) + self.assertIsNone(self.client.drain_events) + conn.close.assert_called_with() diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_memory.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_memory.py new file mode 100644 index 0000000..605527f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_memory.py @@ -0,0 +1,157 @@ +from __future__ import absolute_import + +import socket + +from kombu import Connection, Exchange, Queue, Consumer, Producer + +from kombu.tests.case import Case + + +class test_MemoryTransport(Case): + + def setUp(self): + self.c = Connection(transport='memory') + self.e = Exchange('test_transport_memory') + self.q = Queue('test_transport_memory', + exchange=self.e, + routing_key='test_transport_memory') + self.q2 = Queue('test_transport_memory2', + exchange=self.e, + routing_key='test_transport_memory2') + self.fanout = Exchange('test_transport_memory_fanout', type='fanout') + self.q3 = Queue('test_transport_memory_fanout1', + exchange=self.fanout) + self.q4 = Queue('test_transport_memory_fanout2', + exchange=self.fanout) + + def test_driver_version(self): + self.assertTrue(self.c.transport.driver_version()) + + def test_produce_consume_noack(self): + channel = self.c.channel() + producer = Producer(channel, self.e) + consumer = Consumer(channel, self.q, no_ack=True) + + for i in range(10): + producer.publish({'foo': i}, routing_key='test_transport_memory') + + _received = [] + + def callback(message_data, message): + _received.append(message) + + consumer.register_callback(callback) + consumer.consume() + + while 1: + if len(_received) == 10: + break + self.c.drain_events() + + self.assertEqual(len(_received), 10) + + def test_produce_consume_fanout(self): + producer = self.c.Producer() + consumer = self.c.Consumer([self.q3, self.q4]) + + producer.publish( + {'hello': 'world'}, + declare=consumer.queues, + exchange=self.fanout, + ) + + self.assertEqual(self.q3(self.c).get().payload, {'hello': 'world'}) + self.assertEqual(self.q4(self.c).get().payload, {'hello': 'world'}) + self.assertIsNone(self.q3(self.c).get()) + self.assertIsNone(self.q4(self.c).get()) + + def test_produce_consume(self): + channel = self.c.channel() + producer = Producer(channel, self.e) + consumer1 = Consumer(channel, self.q) + consumer2 = Consumer(channel, self.q2) + self.q2(channel).declare() + + for i in range(10): + producer.publish({'foo': i}, routing_key='test_transport_memory') + for i in range(10): + producer.publish({'foo': i}, routing_key='test_transport_memory2') + + _received1 = [] + _received2 = [] + + def callback1(message_data, message): + _received1.append(message) + message.ack() + + def callback2(message_data, message): + _received2.append(message) + message.ack() + + consumer1.register_callback(callback1) + consumer2.register_callback(callback2) + + consumer1.consume() + consumer2.consume() + + while 1: + if len(_received1) + len(_received2) == 20: + break + self.c.drain_events() + + self.assertEqual(len(_received1) + len(_received2), 20) + + # compression + producer.publish({'compressed': True}, + routing_key='test_transport_memory', + compression='zlib') + m = self.q(channel).get() + self.assertDictEqual(m.payload, {'compressed': True}) + + # queue.delete + for i in range(10): + producer.publish({'foo': i}, routing_key='test_transport_memory') + self.assertTrue(self.q(channel).get()) + self.q(channel).delete() + self.q(channel).declare() + self.assertIsNone(self.q(channel).get()) + + # queue.purge + for i in range(10): + producer.publish({'foo': i}, routing_key='test_transport_memory2') + self.assertTrue(self.q2(channel).get()) + self.q2(channel).purge() + self.assertIsNone(self.q2(channel).get()) + + def test_drain_events(self): + with self.assertRaises(socket.timeout): + self.c.drain_events(timeout=0.1) + + c1 = self.c.channel() + c2 = self.c.channel() + + with self.assertRaises(socket.timeout): + self.c.drain_events(timeout=0.1) + + del(c1) # so pyflakes doesn't complain. + del(c2) + + def test_drain_events_unregistered_queue(self): + c1 = self.c.channel() + + class Cycle(object): + + def get(self, timeout=None): + return ('foo', 'foo'), c1 + + self.c.transport.cycle = Cycle() + with self.assertRaises(KeyError): + self.c.drain_events() + + def test_queue_for(self): + chan = self.c.channel() + chan.queues.clear() + + x = chan._queue_for('foo') + self.assertTrue(x) + self.assertIs(chan._queue_for('foo'), x) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_mongodb.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_mongodb.py new file mode 100644 index 0000000..c8d71de --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_mongodb.py @@ -0,0 +1,120 @@ +from __future__ import absolute_import + +from kombu import Connection + +from kombu.tests.case import Case, SkipTest, Mock, skip_if_not_module + + +class MockConnection(dict): + + def __setattr__(self, key, value): + self[key] = value + + +class test_mongodb(Case): + + def _get_connection(self, url, **kwargs): + from kombu.transport import mongodb + + class _Channel(mongodb.Channel): + + def _create_client(self): + self._client = Mock(name='client') + + class Transport(mongodb.Transport): + Connection = MockConnection + Channel = _Channel + + return Connection(url, transport=Transport, **kwargs).connect() + + @skip_if_not_module('pymongo') + def test_defaults(self): + url = 'mongodb://' + + c = self._get_connection(url) + hostname, dbname, options = c.channels[0]._parse_uri() + + self.assertEqual(dbname, 'kombu_default') + self.assertEqual(hostname, 'mongodb://127.0.0.1') + + @skip_if_not_module('pymongo') + def test_custom_host(self): + url = 'mongodb://localhost' + c = self._get_connection(url) + hostname, dbname, options = c.channels[0]._parse_uri() + + self.assertEqual(dbname, 'kombu_default') + + @skip_if_not_module('pymongo') + def test_custom_database(self): + url = 'mongodb://localhost/dbname' + c = self._get_connection(url) + hostname, dbname, options = c.channels[0]._parse_uri() + + self.assertEqual(dbname, 'dbname') + + @skip_if_not_module('pymongo') + def test_custom_credentials(self): + url = 'mongodb://localhost/dbname' + c = self._get_connection(url, userid='foo', password='bar') + hostname, dbname, options = c.channels[0]._parse_uri() + + self.assertEqual(hostname, 'mongodb://foo:bar@localhost/dbname') + self.assertEqual(dbname, 'dbname') + + @skip_if_not_module('pymongo') + def test_options(self): + url = 'mongodb://localhost,localhost2:29017/dbname?fsync=true' + c = self._get_connection(url) + + hostname, dbname, options = c.channels[0]._parse_uri() + + self.assertTrue(options['fsync']) + + @skip_if_not_module('pymongo') + def test_real_connections(self): + from pymongo.errors import ConfigurationError + + raise SkipTest( + 'Test is functional: it actually connects to mongod') + + url = 'mongodb://localhost,localhost:29017/dbname' + c = self._get_connection(url) + client = c.channels[0].client + + nodes = client.connection.nodes + # If there's just 1 node it is because we're connecting to a single + # server instead of a repl / mongoss. + if len(nodes) == 2: + self.assertTrue(('localhost', 29017) in nodes) + self.assertEqual(client.name, 'dbname') + + url = 'mongodb://localhost:27017,localhost2:29017/dbname' + c = self._get_connection(url) + client = c.channels[0].client + + # Login to admin db since there's no db specified + url = 'mongodb://adminusername:adminpassword@localhost' + c = self._get_connection() + client = c.channels[0].client + self.assertEqual(client.name, 'kombu_default') + + # Lets make sure that using admin db doesn't break anything + # when no user is specified + url = 'mongodb://localhost' + c = self._get_connection(url) + client = c.channels[0].client + + # Assuming there's user 'username' with password 'password' + # configured in mongodb + url = 'mongodb://username:password@localhost/dbname' + c = self._get_connection(url) + client = c.channels[0].client + + # Assuming there's no user 'nousername' with password 'nopassword' + # configured in mongodb + url = 'mongodb://nousername:nopassword@localhost/dbname' + c = self._get_connection(url) + + with self.assertRaises(ConfigurationError): + c.channels[0].client diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_pyamqp.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_pyamqp.py new file mode 100644 index 0000000..d6a910b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_pyamqp.py @@ -0,0 +1,179 @@ +from __future__ import absolute_import + +import sys + +from itertools import count + +try: + import amqp # noqa +except ImportError: + pyamqp = None # noqa +else: + from kombu.transport import pyamqp +from kombu import Connection +from kombu.five import nextfun + +from kombu.tests.case import Case, Mock, SkipTest, mask_modules, patch + + +class MockConnection(dict): + + def __setattr__(self, key, value): + self[key] = value + + +class test_Channel(Case): + + def setUp(self): + if pyamqp is None: + raise SkipTest('py-amqp not installed') + + class Channel(pyamqp.Channel): + wait_returns = [] + + def _x_open(self, *args, **kwargs): + pass + + def wait(self, *args, **kwargs): + return self.wait_returns + + def _send_method(self, *args, **kwargs): + pass + + self.conn = Mock() + self.conn._get_free_channel_id.side_effect = nextfun(count(0)) + self.conn.channels = {} + self.channel = Channel(self.conn, 0) + + def test_init(self): + self.assertFalse(self.channel.no_ack_consumers) + + def test_prepare_message(self): + self.assertTrue(self.channel.prepare_message( + 'foobar', 10, 'application/data', 'utf-8', + properties={}, + )) + + def test_message_to_python(self): + message = Mock() + message.headers = {} + message.properties = {} + self.assertTrue(self.channel.message_to_python(message)) + + def test_close_resolves_connection_cycle(self): + self.assertIsNotNone(self.channel.connection) + self.channel.close() + self.assertIsNone(self.channel.connection) + + def test_basic_consume_registers_ack_status(self): + self.channel.wait_returns = 'my-consumer-tag' + self.channel.basic_consume('foo', no_ack=True) + self.assertIn('my-consumer-tag', self.channel.no_ack_consumers) + + self.channel.wait_returns = 'other-consumer-tag' + self.channel.basic_consume('bar', no_ack=False) + self.assertNotIn('other-consumer-tag', self.channel.no_ack_consumers) + + self.channel.basic_cancel('my-consumer-tag') + self.assertNotIn('my-consumer-tag', self.channel.no_ack_consumers) + + +class test_Transport(Case): + + def setUp(self): + if pyamqp is None: + raise SkipTest('py-amqp not installed') + self.connection = Connection('pyamqp://') + self.transport = self.connection.transport + + def test_create_channel(self): + connection = Mock() + self.transport.create_channel(connection) + connection.channel.assert_called_with() + + def test_driver_version(self): + self.assertTrue(self.transport.driver_version()) + + def test_drain_events(self): + connection = Mock() + self.transport.drain_events(connection, timeout=10.0) + connection.drain_events.assert_called_with(timeout=10.0) + + def test_dnspython_localhost_resolve_bug(self): + + class Conn(object): + + def __init__(self, **kwargs): + vars(self).update(kwargs) + + self.transport.Connection = Conn + self.transport.client.hostname = 'localhost' + conn1 = self.transport.establish_connection() + self.assertEqual(conn1.host, '127.0.0.1:5672') + + self.transport.client.hostname = 'example.com' + conn2 = self.transport.establish_connection() + self.assertEqual(conn2.host, 'example.com:5672') + + def test_close_connection(self): + connection = Mock() + connection.client = Mock() + self.transport.close_connection(connection) + + self.assertIsNone(connection.client) + connection.close.assert_called_with() + + @mask_modules('ssl') + def test_import_no_ssl(self): + pm = sys.modules.pop('amqp.connection') + try: + from amqp.connection import SSLError + self.assertEqual(SSLError.__module__, 'amqp.connection') + finally: + if pm is not None: + sys.modules['amqp.connection'] = pm + + +class test_pyamqp(Case): + + def setUp(self): + if pyamqp is None: + raise SkipTest('py-amqp not installed') + + def test_default_port(self): + + class Transport(pyamqp.Transport): + Connection = MockConnection + + c = Connection(port=None, transport=Transport).connect() + self.assertEqual(c['host'], + '127.0.0.1:%s' % (Transport.default_port, )) + + def test_custom_port(self): + + class Transport(pyamqp.Transport): + Connection = MockConnection + + c = Connection(port=1337, transport=Transport).connect() + self.assertEqual(c['host'], '127.0.0.1:1337') + + def test_register_with_event_loop(self): + t = pyamqp.Transport(Mock()) + conn = Mock(name='conn') + loop = Mock(name='loop') + t.register_with_event_loop(conn, loop) + loop.add_reader.assert_called_with( + conn.sock, t.on_readable, conn, loop, + ) + + def test_heartbeat_check(self): + t = pyamqp.Transport(Mock()) + conn = Mock() + t.heartbeat_check(conn, rate=4.331) + conn.heartbeat_tick.assert_called_with(rate=4.331) + + def test_get_manager(self): + with patch('kombu.transport.pyamqp.get_manager') as get_manager: + t = pyamqp.Transport(Mock()) + t.get_manager(1, kw=2) + get_manager.assert_called_with(t.client, 1, kw=2) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_qpid.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_qpid.py new file mode 100644 index 0000000..b70ce4e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_qpid.py @@ -0,0 +1,1928 @@ +from __future__ import absolute_import + +import select +import ssl +import socket +import sys +import time +import uuid + +from collections import Callable +from itertools import count +from functools import wraps + +from mock import call + +from kombu.five import Empty, keys, range, monotonic +from kombu.transport.qpid import (AuthenticationFailure, Channel, Connection, + ConnectionError, Message, NotFound, QoS, + Transport) +from kombu.transport.virtual import Base64 +from kombu.tests.case import Case, Mock, case_no_pypy, case_no_python3 +from kombu.tests.case import patch +from kombu.utils.compat import OrderedDict + + +QPID_MODULE = 'kombu.transport.qpid' + + +def disable_runtime_dependency_check(cls): + """A decorator to disable runtime dependency checking""" + setup = cls.setUp + teardown = cls.tearDown + dependency_is_none_patcher = patch(QPID_MODULE + '.dependency_is_none') + + @wraps(setup) + def around_setup(self): + mock_dependency_is_none = dependency_is_none_patcher.start() + mock_dependency_is_none.return_value = False + setup(self) + + @wraps(setup) + def around_teardown(self): + dependency_is_none_patcher.stop() + teardown(self) + + cls.setUp = around_setup + cls.tearDown = around_teardown + return cls + + +class ExtraAssertionsMixin(object): + """A mixin class adding assertDictEqual and assertDictContainsSubset""" + + def assertDictEqual(self, a, b, msg=None): + """ + Test that two dictionaries are equal. + + Implemented here because this method was not available until Python + 2.6. This asserts that the unique set of keys are the same in a and b. + Also asserts that the value of each key is the same in a and b using + the is operator. + """ + self.assertEqual(set(keys(a)), set(keys(b))) + for key in keys(a): + self.assertEqual(a[key], b[key]) + + def assertDictContainsSubset(self, a, b, msg=None): + """ + Assert that all the key/value pairs in a exist in b. + """ + for key in keys(a): + self.assertIn(key, b) + self.assertEqual(a[key], b[key]) + + +class QpidException(Exception): + """ + An object used to mock Exceptions provided by qpid.messaging.exceptions + """ + + def __init__(self, code=None, text=None): + super(Exception, self).__init__(self) + self.code = code + self.text = text + + +class BreakOutException(Exception): + pass + + +@case_no_python3 +@case_no_pypy +class TestQoS__init__(Case): + + def setUp(self): + self.mock_session = Mock() + self.qos = QoS(self.mock_session) + + def test__init__prefetch_default_set_correct_without_prefetch_value(self): + self.assertEqual(self.qos.prefetch_count, 1) + + def test__init__prefetch_is_hard_set_to_one(self): + qos_limit_two = QoS(self.mock_session) + self.assertEqual(qos_limit_two.prefetch_count, 1) + + def test__init___not_yet_acked_is_initialized(self): + self.assertIsInstance(self.qos._not_yet_acked, OrderedDict) + + +@case_no_python3 +@case_no_pypy +class TestQoSCanConsume(Case): + + def setUp(self): + session = Mock() + self.qos = QoS(session) + + def test_True_when_prefetch_limit_is_zero(self): + self.qos.prefetch_count = 0 + self.qos._not_yet_acked = [] + self.assertTrue(self.qos.can_consume()) + + def test_True_when_len_of__not_yet_acked_is_lt_prefetch_count(self): + self.qos.prefetch_count = 3 + self.qos._not_yet_acked = ['a', 'b'] + self.assertTrue(self.qos.can_consume()) + + def test_False_when_len_of__not_yet_acked_is_eq_prefetch_count(self): + self.qos.prefetch_count = 3 + self.qos._not_yet_acked = ['a', 'b', 'c'] + self.assertFalse(self.qos.can_consume()) + + +@case_no_python3 +@case_no_pypy +class TestQoSCanConsumeMaxEstimate(Case): + + def setUp(self): + self.mock_session = Mock() + self.qos = QoS(self.mock_session) + + def test_return_one_when_prefetch_count_eq_zero(self): + self.qos.prefetch_count = 0 + self.assertEqual(self.qos.can_consume_max_estimate(), 1) + + def test_return_prefetch_count_sub_len__not_yet_acked(self): + self.qos._not_yet_acked = ['a', 'b'] + self.qos.prefetch_count = 4 + self.assertEqual(self.qos.can_consume_max_estimate(), 2) + + +@case_no_python3 +@case_no_pypy +class TestQoSAck(Case): + + def setUp(self): + self.mock_session = Mock() + self.qos = QoS(self.mock_session) + + def test_ack_pops__not_yet_acked(self): + message = Mock() + self.qos.append(message, 1) + self.assertIn(1, self.qos._not_yet_acked) + self.qos.ack(1) + self.assertNotIn(1, self.qos._not_yet_acked) + + def test_ack_calls_session_acknowledge_with_message(self): + message = Mock() + self.qos.append(message, 1) + self.qos.ack(1) + self.qos.session.acknowledge.assert_called_with(message=message) + + +@case_no_python3 +@case_no_pypy +class TestQoSReject(Case): + + def setUp(self): + self.mock_session = Mock() + self.mock_message = Mock() + self.qos = QoS(self.mock_session) + self.patch_qpid = patch(QPID_MODULE + '.qpid') + self.mock_qpid = self.patch_qpid.start() + self.mock_Disposition = self.mock_qpid.messaging.Disposition + self.mock_RELEASED = self.mock_qpid.messaging.RELEASED + self.mock_REJECTED = self.mock_qpid.messaging.REJECTED + + def tearDown(self): + self.patch_qpid.stop() + + def test_reject_pops__not_yet_acked(self): + self.qos.append(self.mock_message, 1) + self.assertIn(1, self.qos._not_yet_acked) + self.qos.reject(1) + self.assertNotIn(1, self.qos._not_yet_acked) + + def test_reject_requeue_true(self): + self.qos.append(self.mock_message, 1) + self.qos.reject(1, requeue=True) + self.mock_Disposition.assert_called_with(self.mock_RELEASED) + self.qos.session.acknowledge.assert_called_with( + message=self.mock_message, + disposition=self.mock_Disposition.return_value, + ) + + def test_reject_requeue_false(self): + message = Mock() + self.qos.append(message, 1) + self.qos.reject(1, requeue=False) + self.mock_Disposition.assert_called_with(self.mock_REJECTED) + self.qos.session.acknowledge.assert_called_with( + message=message, disposition=self.mock_Disposition.return_value, + ) + + +@case_no_python3 +@case_no_pypy +class TestQoS(Case): + + def mock_message_factory(self): + """Create and return a mock message tag and delivery_tag.""" + m_delivery_tag = self.delivery_tag_generator.next() + m = 'message %s' % (m_delivery_tag, ) + return m, m_delivery_tag + + def add_n_messages_to_qos(self, n, qos): + """Add N mock messages into the passed in qos object""" + for i in range(n): + self.add_message_to_qos(qos) + + def add_message_to_qos(self, qos): + """Add a single mock message into the passed in qos object. + + Uses the mock_message_factory() to create the message and + delivery_tag. + """ + m, m_delivery_tag = self.mock_message_factory() + qos.append(m, m_delivery_tag) + + def setUp(self): + self.mock_session = Mock() + self.qos_no_limit = QoS(self.mock_session) + self.qos_limit_2 = QoS(self.mock_session, prefetch_count=2) + self.delivery_tag_generator = count(1) + + def test_append(self): + """Append two messages and check inside the QoS object that they + were put into the internal data structures correctly + """ + qos = self.qos_no_limit + m1, m1_tag = self.mock_message_factory() + m2, m2_tag = self.mock_message_factory() + qos.append(m1, m1_tag) + length_not_yet_acked = len(qos._not_yet_acked) + self.assertEqual(length_not_yet_acked, 1) + checked_message1 = qos._not_yet_acked[m1_tag] + self.assertIs(m1, checked_message1) + qos.append(m2, m2_tag) + length_not_yet_acked = len(qos._not_yet_acked) + self.assertEqual(length_not_yet_acked, 2) + checked_message2 = qos._not_yet_acked[m2_tag] + self.assertIs(m2, checked_message2) + + def test_get(self): + """Append two messages, and use get to receive them""" + qos = self.qos_no_limit + m1, m1_tag = self.mock_message_factory() + m2, m2_tag = self.mock_message_factory() + qos.append(m1, m1_tag) + qos.append(m2, m2_tag) + message1 = qos.get(m1_tag) + message2 = qos.get(m2_tag) + self.assertIs(m1, message1) + self.assertIs(m2, message2) + + +@case_no_python3 +@case_no_pypy +class ConnectionTestBase(Case): + + @patch(QPID_MODULE + '.qpid') + def setUp(self, mock_qpid): + self.connection_options = { + 'host': 'localhost', + 'port': 5672, + 'transport': 'tcp', + 'timeout': 10, + 'sasl_mechanisms': 'ANONYMOUS', + } + self.mock_qpid_connection = mock_qpid.messaging.Connection + self.conn = Connection(**self.connection_options) + + +@case_no_python3 +@case_no_pypy +class TestConnectionInit(ExtraAssertionsMixin, ConnectionTestBase): + + def test_stores_connection_options(self): + # ensure that only one mech was passed into connection. The other + # options should all be passed through as-is + modified_conn_opts = self.connection_options + self.assertDictEqual( + modified_conn_opts, self.conn.connection_options, + ) + + def test_class_variables(self): + self.assertIsInstance(self.conn.channels, list) + self.assertIsInstance(self.conn._callbacks, dict) + + def test_establishes_connection(self): + modified_conn_opts = self.connection_options + self.mock_qpid_connection.establish.assert_called_with( + **modified_conn_opts + ) + + def test_saves_established_connection(self): + created_conn = self.mock_qpid_connection.establish.return_value + self.assertIs(self.conn._qpid_conn, created_conn) + + @patch(QPID_MODULE + '.ConnectionError', new=(QpidException, )) + @patch(QPID_MODULE + '.sys.exc_info') + @patch(QPID_MODULE + '.qpid') + def test_mutates_ConnError_by_message(self, mock_qpid, mock_exc_info): + text = 'connection-forced: Authentication failed(320)' + my_conn_error = QpidException(text=text) + mock_qpid.messaging.Connection.establish.side_effect = my_conn_error + mock_exc_info.return_value = 'a', 'b', None + try: + self.conn = Connection(**self.connection_options) + except AuthenticationFailure as error: + exc_info = sys.exc_info() + self.assertNotIsInstance(error, QpidException) + self.assertIs(exc_info[1], 'b') + self.assertIsNone(exc_info[2]) + else: + self.fail('ConnectionError type was not mutated correctly') + + @patch(QPID_MODULE + '.ConnectionError', new=(QpidException, )) + @patch(QPID_MODULE + '.sys.exc_info') + @patch(QPID_MODULE + '.qpid') + def test_mutates_ConnError_by_code(self, mock_qpid, mock_exc_info): + my_conn_error = QpidException(code=320, text='someothertext') + mock_qpid.messaging.Connection.establish.side_effect = my_conn_error + mock_exc_info.return_value = 'a', 'b', None + try: + self.conn = Connection(**self.connection_options) + except AuthenticationFailure as error: + exc_info = sys.exc_info() + self.assertNotIsInstance(error, QpidException) + self.assertIs(exc_info[1], 'b') + self.assertIsNone(exc_info[2]) + else: + self.fail('ConnectionError type was not mutated correctly') + + @patch(QPID_MODULE + '.ConnectionError', new=(QpidException, )) + @patch(QPID_MODULE + '.sys.exc_info') + @patch(QPID_MODULE + '.qpid') + def test_connection__init__mutates_ConnError_by_message2(self, mock_qpid, + mock_exc_info): + """ + Test for PLAIN connection via python-saslwrapper, sans cyrus-sasl-plain + + This test is specific for what is returned when we attempt to connect + with PLAIN mech and python-saslwrapper is installed, but + cyrus-sasl-plain is not installed. + """ + my_conn_error = QpidException() + my_conn_error.text = 'Error in sasl_client_start (-4) SASL(-4): no '\ + 'mechanism available' + mock_qpid.messaging.Connection.establish.side_effect = my_conn_error + mock_exc_info.return_value = ('a', 'b', None) + try: + self.conn = Connection(**self.connection_options) + except AuthenticationFailure as error: + exc_info = sys.exc_info() + self.assertTrue(not isinstance(error, QpidException)) + self.assertTrue(exc_info[1] is 'b') + self.assertTrue(exc_info[2] is None) + else: + self.fail('ConnectionError type was not mutated correctly') + + @patch(QPID_MODULE + '.ConnectionError', new=(QpidException, )) + @patch(QPID_MODULE + '.sys.exc_info') + @patch(QPID_MODULE + '.qpid') + def test_unknown_connection_error(self, mock_qpid, mock_exc_info): + # If we get a connection error that we don't understand, + # bubble it up as-is + my_conn_error = QpidException(code=999, text='someothertext') + mock_qpid.messaging.Connection.establish.side_effect = my_conn_error + mock_exc_info.return_value = 'a', 'b', None + try: + self.conn = Connection(**self.connection_options) + except Exception as error: + self.assertTrue(error.code == 999) + else: + self.fail('Connection should have thrown an exception') + + @patch.object(Transport, 'channel_errors', new=(QpidException, )) + @patch(QPID_MODULE + '.qpid') + @patch(QPID_MODULE + '.ConnectionError', new=IOError) + def test_non_qpid_error_raises(self, mock_qpid): + mock_Qpid_Connection = mock_qpid.messaging.Connection + my_conn_error = SyntaxError() + my_conn_error.text = 'some non auth related error message' + mock_Qpid_Connection.establish.side_effect = my_conn_error + with self.assertRaises(SyntaxError): + Connection(**self.connection_options) + + @patch(QPID_MODULE + '.qpid') + @patch(QPID_MODULE + '.ConnectionError', new=IOError) + def test_non_auth_conn_error_raises(self, mock_qpid): + mock_Qpid_Connection = mock_qpid.messaging.Connection + my_conn_error = IOError() + my_conn_error.text = 'some non auth related error message' + mock_Qpid_Connection.establish.side_effect = my_conn_error + with self.assertRaises(IOError): + Connection(**self.connection_options) + + +@case_no_python3 +@case_no_pypy +class TestConnectionClassAttributes(ConnectionTestBase): + + def test_connection_verify_class_attributes(self): + self.assertEqual(Channel, Connection.Channel) + + +@case_no_python3 +@case_no_pypy +class TestConnectionGetQpidConnection(ConnectionTestBase): + + def test_connection_get_qpid_connection(self): + self.conn._qpid_conn = Mock() + returned_connection = self.conn.get_qpid_connection() + self.assertIs(self.conn._qpid_conn, returned_connection) + + +@case_no_python3 +@case_no_pypy +class TestConnectionClose(ConnectionTestBase): + + def test_connection_close(self): + self.conn._qpid_conn = Mock() + self.conn.close() + self.conn._qpid_conn.close.assert_called_once_with() + + +@case_no_python3 +@case_no_pypy +class TestConnectionCloseChannel(ConnectionTestBase): + + def setUp(self): + super(TestConnectionCloseChannel, self).setUp() + self.conn.channels = Mock() + + def test_connection_close_channel_removes_channel_from_channel_list(self): + mock_channel = Mock() + self.conn.close_channel(mock_channel) + self.conn.channels.remove.assert_called_once_with(mock_channel) + + def test_connection_close_channel_handles_ValueError_being_raised(self): + self.conn.channels.remove = Mock(side_effect=ValueError()) + self.conn.close_channel(Mock()) + + def test_connection_close_channel_set_channel_connection_to_None(self): + mock_channel = Mock() + mock_channel.connection = False + self.conn.channels.remove = Mock(side_effect=ValueError()) + self.conn.close_channel(mock_channel) + self.assertIsNone(mock_channel.connection) + + +@case_no_python3 +@case_no_pypy +class ChannelTestBase(Case): + + def setUp(self): + self.patch_qpidtoollibs = patch(QPID_MODULE + '.qpidtoollibs') + self.mock_qpidtoollibs = self.patch_qpidtoollibs.start() + self.mock_broker_agent = self.mock_qpidtoollibs.BrokerAgent + self.conn = Mock() + self.transport = Mock() + self.channel = Channel(self.conn, self.transport) + + def tearDown(self): + self.patch_qpidtoollibs.stop() + + +@case_no_python3 +@case_no_pypy +class TestChannelPurge(ChannelTestBase): + + def setUp(self): + super(TestChannelPurge, self).setUp() + self.mock_queue = Mock() + + def test_gets_queue(self): + self.channel._purge(self.mock_queue) + getQueue = self.mock_broker_agent.return_value.getQueue + getQueue.assert_called_once_with(self.mock_queue) + + def test_does_not_call_purge_if_message_count_is_zero(self): + values = {'msgDepth': 0} + queue_obj = self.mock_broker_agent.return_value.getQueue.return_value + queue_obj.values = values + self.channel._purge(self.mock_queue) + self.assertFalse(queue_obj.purge.called) + + def test_purges_all_messages_from_queue(self): + values = {'msgDepth': 5} + queue_obj = self.mock_broker_agent.return_value.getQueue.return_value + queue_obj.values = values + self.channel._purge(self.mock_queue) + queue_obj.purge.assert_called_with(5) + + def test_returns_message_count(self): + values = {'msgDepth': 5} + queue_obj = self.mock_broker_agent.return_value.getQueue.return_value + queue_obj.values = values + result = self.channel._purge(self.mock_queue) + self.assertEqual(result, 5) + + @patch(QPID_MODULE + '.NotFound', new=QpidException) + def test_raises_channel_error_if_queue_does_not_exist(self): + self.mock_broker_agent.return_value.getQueue.return_value = None + self.assertRaises(QpidException, self.channel._purge, self.mock_queue) + + +@case_no_python3 +@case_no_pypy +class TestChannelPut(ChannelTestBase): + + @patch(QPID_MODULE + '.qpid') + def test_channel__put_onto_queue(self, mock_qpid): + routing_key = 'routingkey' + mock_message = Mock() + mock_Message_cls = mock_qpid.messaging.Message + + self.channel._put(routing_key, mock_message) + + address_str = '{0}; {{assert: always, node: {{type: queue}}}}'.format( + routing_key, + ) + self.transport.session.sender.assert_called_with(address_str) + mock_Message_cls.assert_called_with( + content=mock_message, subject=None, + ) + mock_sender = self.transport.session.sender.return_value + mock_sender.send.assert_called_with( + mock_Message_cls.return_value, sync=True, + ) + mock_sender.close.assert_called_with() + + @patch(QPID_MODULE + '.qpid') + def test_channel__put_onto_exchange(self, mock_qpid): + mock_routing_key = 'routingkey' + mock_exchange_name = 'myexchange' + mock_message = Mock() + mock_Message_cls = mock_qpid.messaging.Message + + self.channel._put(mock_routing_key, mock_message, mock_exchange_name) + + addrstr = '{0}/{1}; {{assert: always, node: {{type: topic}}}}'.format( + mock_exchange_name, mock_routing_key, + ) + self.transport.session.sender.assert_called_with(addrstr) + mock_Message_cls.assert_called_with( + content=mock_message, subject=mock_routing_key, + ) + mock_sender = self.transport.session.sender.return_value + mock_sender.send.assert_called_with( + mock_Message_cls.return_value, sync=True, + ) + mock_sender.close.assert_called_with() + + +@case_no_python3 +@case_no_pypy +class TestChannelGet(ChannelTestBase): + + def test_channel__get(self): + mock_queue = Mock() + + result = self.channel._get(mock_queue) + + self.transport.session.receiver.assert_called_once_with(mock_queue) + mock_rx = self.transport.session.receiver.return_value + mock_rx.fetch.assert_called_once_with(timeout=0) + mock_rx.close.assert_called_once_with() + self.assertIs(mock_rx.fetch.return_value, result) + + +@case_no_python3 +@case_no_pypy +class TestChannelClose(ChannelTestBase): + + def setUp(self): + super(TestChannelClose, self).setUp() + self.patch_basic_cancel = patch.object(self.channel, 'basic_cancel') + self.mock_basic_cancel = self.patch_basic_cancel.start() + self.mock_receiver1 = Mock() + self.mock_receiver2 = Mock() + self.channel._receivers = { + 1: self.mock_receiver1, 2: self.mock_receiver2, + } + self.channel.closed = False + + def tearDown(self): + self.patch_basic_cancel.stop() + super(TestChannelClose, self).tearDown() + + def test_channel_close_sets_close_attribute(self): + self.channel.close() + self.assertTrue(self.channel.closed) + + def test_channel_close_calls_basic_cancel_on_all_receivers(self): + self.channel.close() + self.mock_basic_cancel.assert_has_calls([call(1), call(2)]) + + def test_channel_close_calls_close_channel_on_connection(self): + self.channel.close() + self.conn.close_channel.assert_called_once_with(self.channel) + + def test_channel_close_calls_close_on_broker_agent(self): + self.channel.close() + self.channel._broker.close.assert_called_once_with() + + def test_channel_close_does_nothing_if_already_closed(self): + self.channel.closed = True + self.channel.close() + self.assertFalse(self.mock_basic_cancel.called) + + def test_channel_close_does_not_call_close_channel_if_conn_is_None(self): + self.channel.connection = None + self.channel.close() + self.assertFalse(self.conn.close_channel.called) + + +@case_no_python3 +@case_no_pypy +class TestChannelBasicQoS(ChannelTestBase): + + def test_channel_basic_qos_always_returns_one(self): + self.channel.basic_qos(2) + self.assertEqual(self.channel.qos.prefetch_count, 1) + + +@case_no_python3 +@case_no_pypy +class TestChannelBasicGet(ChannelTestBase): + + def setUp(self): + super(TestChannelBasicGet, self).setUp() + self.channel.Message = Mock() + self.channel._get = Mock() + + def test_channel_basic_get_calls__get_with_queue(self): + mock_queue = Mock() + self.channel.basic_get(mock_queue) + self.channel._get.assert_called_once_with(mock_queue) + + def test_channel_basic_get_creates_Message_correctly(self): + mock_queue = Mock() + self.channel.basic_get(mock_queue) + mock_raw_message = self.channel._get.return_value.content + self.channel.Message.assert_called_once_with( + self.channel, mock_raw_message, + ) + + def test_channel_basic_get_acknowledges_message_by_default(self): + mock_queue = Mock() + self.channel.basic_get(mock_queue) + mock_qpid_message = self.channel._get.return_value + acknowledge = self.transport.session.acknowledge + acknowledge.assert_called_once_with(message=mock_qpid_message) + + def test_channel_basic_get_acknowledges_message_with_no_ack_False(self): + mock_queue = Mock() + self.channel.basic_get(mock_queue, no_ack=False) + mock_qpid_message = self.channel._get.return_value + acknowledge = self.transport.session.acknowledge + acknowledge.assert_called_once_with(message=mock_qpid_message) + + def test_channel_basic_get_acknowledges_message_with_no_ack_True(self): + mock_queue = Mock() + self.channel.basic_get(mock_queue, no_ack=True) + mock_qpid_message = self.channel._get.return_value + acknowledge = self.transport.session.acknowledge + acknowledge.assert_called_once_with(message=mock_qpid_message) + + def test_channel_basic_get_returns_correct_message(self): + mock_queue = Mock() + basic_get_result = self.channel.basic_get(mock_queue) + expected_message = self.channel.Message.return_value + self.assertIs(expected_message, basic_get_result) + + def test_basic_get_returns_None_when_channel__get_raises_Empty(self): + mock_queue = Mock() + self.channel._get = Mock(side_effect=Empty) + basic_get_result = self.channel.basic_get(mock_queue) + self.assertEqual(self.channel.Message.call_count, 0) + self.assertIsNone(basic_get_result) + + +@case_no_python3 +@case_no_pypy +class TestChannelBasicCancel(ChannelTestBase): + + def setUp(self): + super(TestChannelBasicCancel, self).setUp() + self.channel._receivers = {1: Mock()} + + def test_channel_basic_cancel_no_error_if_consumer_tag_not_found(self): + self.channel.basic_cancel(2) + + def test_channel_basic_cancel_pops_receiver(self): + self.channel.basic_cancel(1) + self.assertNotIn(1, self.channel._receivers) + + def test_channel_basic_cancel_closes_receiver(self): + mock_receiver = self.channel._receivers[1] + self.channel.basic_cancel(1) + mock_receiver.close.assert_called_once_with() + + def test_channel_basic_cancel_pops__tag_to_queue(self): + self.channel._tag_to_queue = Mock() + self.channel.basic_cancel(1) + self.channel._tag_to_queue.pop.assert_called_once_with(1, None) + + def test_channel_basic_cancel_pops_connection__callbacks(self): + self.channel._tag_to_queue = Mock() + self.channel.basic_cancel(1) + mock_queue = self.channel._tag_to_queue.pop.return_value + self.conn._callbacks.pop.assert_called_once_with(mock_queue, None) + + +@case_no_python3 +@case_no_pypy +class TestChannelInit(ChannelTestBase, ExtraAssertionsMixin): + + def test_channel___init__sets_variables_as_expected(self): + self.assertIs(self.conn, self.channel.connection) + self.assertIs(self.transport, self.channel.transport) + self.assertFalse(self.channel.closed) + self.conn.get_qpid_connection.assert_called_once_with() + expected_broker_agent = self.mock_broker_agent.return_value + self.assertIs(self.channel._broker, expected_broker_agent) + self.assertDictEqual(self.channel._tag_to_queue, {}) + self.assertDictEqual(self.channel._receivers, {}) + self.assertIs(self.channel._qos, None) + + +@case_no_python3 +@case_no_pypy +class TestChannelBasicConsume(ChannelTestBase, ExtraAssertionsMixin): + + def setUp(self): + super(TestChannelBasicConsume, self).setUp() + self.conn._callbacks = {} + + def test_channel_basic_consume_adds_queue_to__tag_to_queue(self): + mock_tag = Mock() + mock_queue = Mock() + self.channel.basic_consume(mock_queue, Mock(), Mock(), mock_tag) + expected_dict = {mock_tag: mock_queue} + self.assertDictEqual(expected_dict, self.channel._tag_to_queue) + + def test_channel_basic_consume_adds_entry_to_connection__callbacks(self): + mock_queue = Mock() + self.channel.basic_consume(mock_queue, Mock(), Mock(), Mock()) + self.assertIn(mock_queue, self.conn._callbacks) + self.assertIsInstance(self.conn._callbacks[mock_queue], Callable) + + def test_channel_basic_consume_creates_new_receiver(self): + mock_queue = Mock() + self.channel.basic_consume(mock_queue, Mock(), Mock(), Mock()) + self.transport.session.receiver.assert_called_once_with(mock_queue) + + def test_channel_basic_consume_saves_new_receiver(self): + mock_tag = Mock() + self.channel.basic_consume(Mock(), Mock(), Mock(), mock_tag) + new_mock_receiver = self.transport.session.receiver.return_value + expected_dict = {mock_tag: new_mock_receiver} + self.assertDictEqual(expected_dict, self.channel._receivers) + + def test_channel_basic_consume_sets_capacity_on_new_receiver(self): + mock_prefetch_count = Mock() + self.channel.qos.prefetch_count = mock_prefetch_count + self.channel.basic_consume(Mock(), Mock(), Mock(), Mock()) + new_receiver = self.transport.session.receiver.return_value + self.assertTrue(new_receiver.capacity is mock_prefetch_count) + + def get_callback(self, no_ack=Mock(), original_cb=Mock()): + self.channel.Message = Mock() + mock_queue = Mock() + self.channel.basic_consume(mock_queue, no_ack, original_cb, Mock()) + return self.conn._callbacks[mock_queue] + + def test_channel_basic_consume_callback_creates_Message_correctly(self): + callback = self.get_callback() + mock_qpid_message = Mock() + callback(mock_qpid_message) + mock_content = mock_qpid_message.content + self.channel.Message.assert_called_once_with( + self.channel, mock_content, + ) + + def test_channel_basic_consume_callback_adds_message_to_QoS(self): + self.channel._qos = Mock() + callback = self.get_callback() + mock_qpid_message = Mock() + callback(mock_qpid_message) + mock_delivery_tag = self.channel.Message.return_value.delivery_tag + self.channel._qos.append.assert_called_once_with( + mock_qpid_message, mock_delivery_tag, + ) + + def test_channel_basic_consume_callback_gratuitously_acks(self): + self.channel.basic_ack = Mock() + callback = self.get_callback() + mock_qpid_message = Mock() + callback(mock_qpid_message) + mock_delivery_tag = self.channel.Message.return_value.delivery_tag + self.channel.basic_ack.assert_called_once_with(mock_delivery_tag) + + def test_channel_basic_consume_callback_does_not_ack_when_needed(self): + self.channel.basic_ack = Mock() + callback = self.get_callback(no_ack=False) + mock_qpid_message = Mock() + callback(mock_qpid_message) + self.assertFalse(self.channel.basic_ack.called) + + def test_channel_basic_consume_callback_calls_real_callback(self): + self.channel.basic_ack = Mock() + mock_original_callback = Mock() + callback = self.get_callback(original_cb=mock_original_callback) + mock_qpid_message = Mock() + callback(mock_qpid_message) + expected_message = self.channel.Message.return_value + mock_original_callback.assert_called_once_with(expected_message) + + +@case_no_python3 +@case_no_pypy +class TestChannelQueueDelete(ChannelTestBase): + + def setUp(self): + super(TestChannelQueueDelete, self).setUp() + self.patch__has_queue = patch.object(self.channel, '_has_queue') + self.mock__has_queue = self.patch__has_queue.start() + self.patch__size = patch.object(self.channel, '_size') + self.mock__size = self.patch__size.start() + self.patch__delete = patch.object(self.channel, '_delete') + self.mock__delete = self.patch__delete.start() + self.mock_queue = Mock() + + def tearDown(self): + self.patch__has_queue.stop() + self.patch__size.stop() + self.patch__delete.stop() + super(TestChannelQueueDelete, self).tearDown() + + def test_checks_if_queue_exists(self): + self.channel.queue_delete(self.mock_queue) + self.mock__has_queue.assert_called_once_with(self.mock_queue) + + def test_does_nothing_if_queue_does_not_exist(self): + self.mock__has_queue.return_value = False + self.channel.queue_delete(self.mock_queue) + self.assertFalse(self.mock__delete.called) + + def test_not_empty_and_if_empty_True_no_delete(self): + self.mock__size.return_value = 1 + self.channel.queue_delete(self.mock_queue, if_empty=True) + mock_broker = self.mock_broker_agent.return_value + self.assertFalse(mock_broker.getQueue.called) + + def test_calls_get_queue(self): + self.channel.queue_delete(self.mock_queue) + getQueue = self.mock_broker_agent.return_value.getQueue + getQueue.assert_called_once_with(self.mock_queue) + + def test_gets_queue_attribute(self): + self.channel.queue_delete(self.mock_queue) + queue_obj = self.mock_broker_agent.return_value.getQueue.return_value + queue_obj.getAttributes.assert_called_once_with() + + def test_queue_in_use_and_if_unused_no_delete(self): + queue_obj = self.mock_broker_agent.return_value.getQueue.return_value + queue_obj.getAttributes.return_value = {'consumerCount': 1} + self.channel.queue_delete(self.mock_queue, if_unused=True) + self.assertFalse(self.mock__delete.called) + + def test_calls__delete_with_queue(self): + self.channel.queue_delete(self.mock_queue) + self.mock__delete.assert_called_once_with(self.mock_queue) + + +@case_no_python3 +@case_no_pypy +class TestChannel(ExtraAssertionsMixin, Case): + + @patch(QPID_MODULE + '.qpidtoollibs') + def setUp(self, mock_qpidtoollibs): + self.mock_connection = Mock() + self.mock_qpid_connection = Mock() + self.mock_qpid_session = Mock() + self.mock_qpid_connection.session = Mock( + return_value=self.mock_qpid_session, + ) + self.mock_connection.get_qpid_connection = Mock( + return_value=self.mock_qpid_connection, + ) + self.mock_transport = Mock() + self.mock_broker = Mock() + self.mock_Message = Mock() + self.mock_BrokerAgent = mock_qpidtoollibs.BrokerAgent + self.mock_BrokerAgent.return_value = self.mock_broker + self.my_channel = Channel( + self.mock_connection, self.mock_transport, + ) + self.my_channel.Message = self.mock_Message + + def test_verify_QoS_class_attribute(self): + """Verify that the class attribute QoS refers to the QoS object""" + self.assertIs(QoS, Channel.QoS) + + def test_verify_Message_class_attribute(self): + """Verify that the class attribute Message refers to the Message + object.""" + self.assertIs(Message, Channel.Message) + + def test_body_encoding_class_attribute(self): + """Verify that the class attribute body_encoding is set to base64""" + self.assertEqual('base64', Channel.body_encoding) + + def test_codecs_class_attribute(self): + """Verify that the codecs class attribute has a correct key and + value.""" + self.assertIsInstance(Channel.codecs, dict) + self.assertIn('base64', Channel.codecs) + self.assertIsInstance(Channel.codecs['base64'], Base64) + + def test_size(self): + """Test getting the number of messages in a queue specified by + name and returning them.""" + message_count = 5 + mock_queue = Mock() + mock_queue_to_check = Mock() + mock_queue_to_check.values = {'msgDepth': message_count} + self.mock_broker.getQueue.return_value = mock_queue_to_check + result = self.my_channel._size(mock_queue) + self.mock_broker.getQueue.assert_called_with(mock_queue) + self.assertEqual(message_count, result) + + def test_delete(self): + """Test deleting a queue calls purge and delQueue with queue name.""" + mock_queue = Mock() + self.my_channel._purge = Mock() + result = self.my_channel._delete(mock_queue) + self.my_channel._purge.assert_called_with(mock_queue) + self.mock_broker.delQueue.assert_called_with(mock_queue) + self.assertIsNone(result) + + def test_has_queue_true(self): + """Test checking if a queue exists, and it does.""" + mock_queue = Mock() + self.mock_broker.getQueue.return_value = True + result = self.my_channel._has_queue(mock_queue) + self.assertTrue(result) + + def test_has_queue_false(self): + """Test checking if a queue exists, and it does not.""" + mock_queue = Mock() + self.mock_broker.getQueue.return_value = False + result = self.my_channel._has_queue(mock_queue) + self.assertFalse(result) + + @patch('amqp.protocol.queue_declare_ok_t') + def test_queue_declare_with_exception_raised(self, + mock_queue_declare_ok_t): + """Test declare_queue, where an exception is raised and silenced.""" + mock_queue = Mock() + mock_passive = Mock() + mock_durable = Mock() + mock_exclusive = Mock() + mock_auto_delete = Mock() + mock_nowait = Mock() + mock_arguments = Mock() + mock_msg_count = Mock() + mock_queue.startswith.return_value = False + mock_queue.endswith.return_value = False + options = { + 'passive': mock_passive, + 'durable': mock_durable, + 'exclusive': mock_exclusive, + 'auto-delete': mock_auto_delete, + 'arguments': mock_arguments, + } + mock_consumer_count = Mock() + mock_return_value = Mock() + values_dict = { + 'msgDepth': mock_msg_count, + 'consumerCount': mock_consumer_count, + } + mock_queue_data = Mock() + mock_queue_data.values = values_dict + exception_to_raise = Exception('The foo object already exists.') + self.mock_broker.addQueue.side_effect = exception_to_raise + self.mock_broker.getQueue.return_value = mock_queue_data + mock_queue_declare_ok_t.return_value = mock_return_value + result = self.my_channel.queue_declare( + mock_queue, + passive=mock_passive, + durable=mock_durable, + exclusive=mock_exclusive, + auto_delete=mock_auto_delete, + nowait=mock_nowait, + arguments=mock_arguments, + ) + self.mock_broker.addQueue.assert_called_with( + mock_queue, options=options, + ) + mock_queue_declare_ok_t.assert_called_with( + mock_queue, mock_msg_count, mock_consumer_count, + ) + self.assertIs(mock_return_value, result) + + def test_queue_declare_set_ring_policy_for_celeryev(self): + """Test declare_queue sets ring_policy for celeryev.""" + mock_queue = Mock() + mock_queue.startswith.return_value = True + mock_queue.endswith.return_value = False + expected_default_options = { + 'passive': False, + 'durable': False, + 'exclusive': False, + 'auto-delete': True, + 'arguments': None, + 'qpid.policy_type': 'ring', + } + mock_msg_count = Mock() + mock_consumer_count = Mock() + values_dict = { + 'msgDepth': mock_msg_count, + 'consumerCount': mock_consumer_count, + } + mock_queue_data = Mock() + mock_queue_data.values = values_dict + self.mock_broker.addQueue.return_value = None + self.mock_broker.getQueue.return_value = mock_queue_data + self.my_channel.queue_declare(mock_queue) + mock_queue.startswith.assert_called_with('celeryev') + self.mock_broker.addQueue.assert_called_with( + mock_queue, options=expected_default_options, + ) + + def test_queue_declare_set_ring_policy_for_pidbox(self): + """Test declare_queue sets ring_policy for pidbox.""" + mock_queue = Mock() + mock_queue.startswith.return_value = False + mock_queue.endswith.return_value = True + expected_default_options = { + 'passive': False, + 'durable': False, + 'exclusive': False, + 'auto-delete': True, + 'arguments': None, + 'qpid.policy_type': 'ring', + } + mock_msg_count = Mock() + mock_consumer_count = Mock() + values_dict = { + 'msgDepth': mock_msg_count, + 'consumerCount': mock_consumer_count, + } + mock_queue_data = Mock() + mock_queue_data.values = values_dict + self.mock_broker.addQueue.return_value = None + self.mock_broker.getQueue.return_value = mock_queue_data + self.my_channel.queue_declare(mock_queue) + mock_queue.endswith.assert_called_with('pidbox') + self.mock_broker.addQueue.assert_called_with( + mock_queue, options=expected_default_options, + ) + + def test_queue_declare_ring_policy_not_set_as_expected(self): + """Test declare_queue does not set ring_policy as expected.""" + mock_queue = Mock() + mock_queue.startswith.return_value = False + mock_queue.endswith.return_value = False + expected_default_options = { + 'passive': False, + 'durable': False, + 'exclusive': False, + 'auto-delete': True, + 'arguments': None, + } + mock_msg_count = Mock() + mock_consumer_count = Mock() + values_dict = { + 'msgDepth': mock_msg_count, + 'consumerCount': mock_consumer_count, + } + mock_queue_data = Mock() + mock_queue_data.values = values_dict + self.mock_broker.addQueue.return_value = None + self.mock_broker.getQueue.return_value = mock_queue_data + self.my_channel.queue_declare(mock_queue) + mock_queue.startswith.assert_called_with('celeryev') + mock_queue.endswith.assert_called_with('pidbox') + self.mock_broker.addQueue.assert_called_with( + mock_queue, options=expected_default_options, + ) + + def test_queue_declare_test_defaults(self): + """Test declare_queue defaults.""" + mock_queue = Mock() + mock_queue.startswith.return_value = False + mock_queue.endswith.return_value = False + expected_default_options = { + 'passive': False, + 'durable': False, + 'exclusive': False, + 'auto-delete': True, + 'arguments': None, + } + mock_msg_count = Mock() + mock_consumer_count = Mock() + values_dict = { + 'msgDepth': mock_msg_count, + 'consumerCount': mock_consumer_count, + } + mock_queue_data = Mock() + mock_queue_data.values = values_dict + self.mock_broker.addQueue.return_value = None + self.mock_broker.getQueue.return_value = mock_queue_data + self.my_channel.queue_declare(mock_queue) + self.mock_broker.addQueue.assert_called_with( + mock_queue, + options=expected_default_options, + ) + + def test_queue_declare_raises_exception_not_silenced(self): + unique_exception = Exception('This exception should not be silenced') + mock_queue = Mock() + self.mock_broker.addQueue.side_effect = unique_exception + with self.assertRaises(unique_exception.__class__): + self.my_channel.queue_declare(mock_queue) + self.mock_broker.addQueue.assert_called_once_with( + mock_queue, + options={ + 'exclusive': False, + 'durable': False, + 'qpid.policy_type': 'ring', + 'passive': False, + 'arguments': None, + 'auto-delete': True + }) + + def test_exchange_declare_raises_exception_and_silenced(self): + """Create exchange where an exception is raised and then silenced""" + self.mock_broker.addExchange.side_effect = Exception( + 'The foo object already exists.', + ) + self.my_channel.exchange_declare() + + def test_exchange_declare_raises_exception_not_silenced(self): + """Create Exchange where an exception is raised and not silenced.""" + unique_exception = Exception('This exception should not be silenced') + self.mock_broker.addExchange.side_effect = unique_exception + with self.assertRaises(unique_exception.__class__): + self.my_channel.exchange_declare() + + def test_exchange_declare(self): + """Create Exchange where an exception is NOT raised.""" + mock_exchange = Mock() + mock_type = Mock() + mock_durable = Mock() + options = {'durable': mock_durable} + result = self.my_channel.exchange_declare( + mock_exchange, mock_type, mock_durable, + ) + self.mock_broker.addExchange.assert_called_with( + mock_type, mock_exchange, options, + ) + self.assertIsNone(result) + + def test_exchange_delete(self): + """Test the deletion of an exchange by name.""" + mock_exchange = Mock() + result = self.my_channel.exchange_delete(mock_exchange) + self.mock_broker.delExchange.assert_called_with(mock_exchange) + self.assertIsNone(result) + + def test_queue_bind(self): + """Test binding a queue to an exchange using a routing key.""" + mock_queue = Mock() + mock_exchange = Mock() + mock_routing_key = Mock() + self.my_channel.queue_bind( + mock_queue, mock_exchange, mock_routing_key, + ) + self.mock_broker.bind.assert_called_with( + mock_exchange, mock_queue, mock_routing_key, + ) + + def test_queue_unbind(self): + """Test unbinding a queue from an exchange using a routing key.""" + mock_queue = Mock() + mock_exchange = Mock() + mock_routing_key = Mock() + self.my_channel.queue_unbind( + mock_queue, mock_exchange, mock_routing_key, + ) + self.mock_broker.unbind.assert_called_with( + mock_exchange, mock_queue, mock_routing_key, + ) + + def test_queue_purge(self): + """Test purging a queue by name.""" + mock_queue = Mock() + purge_result = Mock() + self.my_channel._purge = Mock(return_value=purge_result) + result = self.my_channel.queue_purge(mock_queue) + self.my_channel._purge.assert_called_with(mock_queue) + self.assertIs(purge_result, result) + + @patch(QPID_MODULE + '.Channel.qos') + def test_basic_ack(self, mock_qos): + """Test that basic_ack calls the QoS object properly.""" + mock_delivery_tag = Mock() + self.my_channel.basic_ack(mock_delivery_tag) + mock_qos.ack.assert_called_with(mock_delivery_tag) + + @patch(QPID_MODULE + '.Channel.qos') + def test_basic_reject(self, mock_qos): + """Test that basic_reject calls the QoS object properly.""" + mock_delivery_tag = Mock() + mock_requeue_value = Mock() + self.my_channel.basic_reject(mock_delivery_tag, mock_requeue_value) + mock_qos.reject.assert_called_with( + mock_delivery_tag, requeue=mock_requeue_value, + ) + + def test_qos_manager_is_none(self): + """Test the qos property if the QoS object did not already exist.""" + self.my_channel._qos = None + result = self.my_channel.qos + self.assertIsInstance(result, QoS) + self.assertEqual(result, self.my_channel._qos) + + def test_qos_manager_already_exists(self): + """Test the qos property if the QoS object already exists.""" + mock_existing_qos = Mock() + self.my_channel._qos = mock_existing_qos + result = self.my_channel.qos + self.assertIs(mock_existing_qos, result) + + def test_prepare_message(self): + """Test that prepare_message() returns the correct result.""" + mock_body = Mock() + mock_priority = Mock() + mock_content_encoding = Mock() + mock_content_type = Mock() + mock_header1 = Mock() + mock_header2 = Mock() + mock_properties1 = Mock() + mock_properties2 = Mock() + headers = {'header1': mock_header1, 'header2': mock_header2} + properties = {'properties1': mock_properties1, + 'properties2': mock_properties2} + result = self.my_channel.prepare_message( + mock_body, + priority=mock_priority, + content_type=mock_content_type, + content_encoding=mock_content_encoding, + headers=headers, + properties=properties) + self.assertIs(mock_body, result['body']) + self.assertIs(mock_content_encoding, result['content-encoding']) + self.assertIs(mock_content_type, result['content-type']) + self.assertDictEqual(headers, result['headers']) + self.assertDictContainsSubset(properties, result['properties']) + self.assertIs( + mock_priority, result['properties']['delivery_info']['priority'], + ) + + @patch('__builtin__.buffer') + @patch(QPID_MODULE + '.Channel.body_encoding') + @patch(QPID_MODULE + '.Channel.encode_body') + @patch(QPID_MODULE + '.Channel._put') + def test_basic_publish(self, mock_put, + mock_encode_body, + mock_body_encoding, + mock_buffer): + """Test basic_publish().""" + mock_original_body = Mock() + mock_encoded_body = 'this is my encoded body' + mock_message = {'body': mock_original_body, + 'properties': {'delivery_info': {}}} + mock_encode_body.return_value = ( + mock_encoded_body, mock_body_encoding, + ) + mock_exchange = Mock() + mock_routing_key = Mock() + mock_encoded_buffered_body = Mock() + mock_buffer.return_value = mock_encoded_buffered_body + self.my_channel.basic_publish( + mock_message, mock_exchange, mock_routing_key, + ) + mock_encode_body.assert_called_once_with( + mock_original_body, mock_body_encoding, + ) + mock_buffer.assert_called_once_with(mock_encoded_body) + self.assertIs(mock_message['body'], mock_encoded_buffered_body) + self.assertIs( + mock_message['properties']['body_encoding'], mock_body_encoding, + ) + self.assertIsInstance( + mock_message['properties']['delivery_tag'], uuid.UUID, + ) + self.assertIs( + mock_message['properties']['delivery_info']['exchange'], + mock_exchange, + ) + self.assertIs( + mock_message['properties']['delivery_info']['routing_key'], + mock_routing_key, + ) + mock_put.assert_called_with( + mock_routing_key, mock_message, mock_exchange, + ) + + @patch(QPID_MODULE + '.Channel.codecs') + def test_encode_body_expected_encoding(self, mock_codecs): + """Test if encode_body() works when encoding is set correctly""" + mock_body = Mock() + mock_encoder = Mock() + mock_encoded_result = Mock() + mock_codecs.get.return_value = mock_encoder + mock_encoder.encode.return_value = mock_encoded_result + result = self.my_channel.encode_body(mock_body, encoding='base64') + expected_result = (mock_encoded_result, 'base64') + self.assertEqual(expected_result, result) + + @patch(QPID_MODULE + '.Channel.codecs') + def test_encode_body_not_expected_encoding(self, mock_codecs): + """Test if encode_body() works when encoding is not set correctly.""" + mock_body = Mock() + result = self.my_channel.encode_body(mock_body, encoding=None) + expected_result = mock_body, None + self.assertEqual(expected_result, result) + + @patch(QPID_MODULE + '.Channel.codecs') + def test_decode_body_expected_encoding(self, mock_codecs): + """Test if decode_body() works when encoding is set correctly.""" + mock_body = Mock() + mock_decoder = Mock() + mock_decoded_result = Mock() + mock_codecs.get.return_value = mock_decoder + mock_decoder.decode.return_value = mock_decoded_result + result = self.my_channel.decode_body(mock_body, encoding='base64') + self.assertEqual(mock_decoded_result, result) + + @patch(QPID_MODULE + '.Channel.codecs') + def test_decode_body_not_expected_encoding(self, mock_codecs): + """Test if decode_body() works when encoding is not set correctly.""" + mock_body = Mock() + result = self.my_channel.decode_body(mock_body, encoding=None) + self.assertEqual(mock_body, result) + + def test_typeof_exchange_exists(self): + """Test that typeof() finds an exchange that already exists.""" + mock_exchange = Mock() + mock_qpid_exchange = Mock() + mock_attributes = {} + mock_type = Mock() + mock_attributes['type'] = mock_type + mock_qpid_exchange.getAttributes.return_value = mock_attributes + self.mock_broker.getExchange.return_value = mock_qpid_exchange + result = self.my_channel.typeof(mock_exchange) + self.assertIs(mock_type, result) + + def test_typeof_exchange_does_not_exist(self): + """Test that typeof() finds an exchange that does not exists.""" + mock_exchange = Mock() + mock_default = Mock() + self.mock_broker.getExchange.return_value = None + result = self.my_channel.typeof(mock_exchange, default=mock_default) + self.assertIs(mock_default, result) + + +@case_no_python3 +@case_no_pypy +@disable_runtime_dependency_check +class TestTransportInit(Case): + + def setUp(self): + self.patch_a = patch.object(Transport, 'verify_runtime_environment') + self.mock_verify_runtime_environment = self.patch_a.start() + + self.patch_b = patch(QPID_MODULE + '.base.Transport.__init__') + self.mock_base_Transport__init__ = self.patch_b.start() + + def tearDown(self): + self.patch_a.stop() + self.patch_b.stop() + + def test_Transport___init___calls_verify_runtime_environment(self): + Transport(Mock()) + self.mock_verify_runtime_environment.assert_called_once_with() + + def test_transport___init___calls_parent_class___init__(self): + m = Mock() + Transport(m) + self.mock_base_Transport__init__.assert_called_once_with(m) + + def test_transport___init___sets_use_async_interface_False(self): + transport = Transport(Mock()) + self.assertFalse(transport.use_async_interface) + + +@case_no_python3 +@case_no_pypy +@disable_runtime_dependency_check +class TestTransportDrainEvents(Case): + + def setUp(self): + self.transport = Transport(Mock()) + self.transport.session = Mock() + self.mock_queue = Mock() + self.mock_message = Mock() + self.mock_conn = Mock() + self.mock_callback = Mock() + self.mock_conn._callbacks = {self.mock_queue: self.mock_callback} + + def mock_next_receiver(self, timeout): + time.sleep(0.3) + mock_receiver = Mock() + mock_receiver.source = self.mock_queue + mock_receiver.fetch.return_value = self.mock_message + return mock_receiver + + def test_socket_timeout_raised_when_all_receivers_empty(self): + with patch(QPID_MODULE + '.QpidEmpty', new=QpidException): + self.transport.session.next_receiver.side_effect = QpidException() + with self.assertRaises(socket.timeout): + self.transport.drain_events(Mock()) + + def test_socket_timeout_raised_when_by_timeout(self): + self.transport.session.next_receiver = self.mock_next_receiver + with self.assertRaises(socket.timeout): + self.transport.drain_events(self.mock_conn, timeout=1) + + def test_timeout_returns_no_earlier_then_asked_for(self): + self.transport.session.next_receiver = self.mock_next_receiver + start_time = monotonic() + try: + self.transport.drain_events(self.mock_conn, timeout=1) + except socket.timeout: + pass + elapsed_time_in_s = monotonic() - start_time + self.assertGreaterEqual(elapsed_time_in_s, 1.0) + + def test_callback_is_called(self): + self.transport.session.next_receiver = self.mock_next_receiver + try: + self.transport.drain_events(self.mock_conn, timeout=1) + except socket.timeout: + pass + self.mock_callback.assert_called_with(self.mock_message) + + +@case_no_python3 +@case_no_pypy +@disable_runtime_dependency_check +class TestTransportCreateChannel(Case): + + def setUp(self): + self.transport = Transport(Mock()) + self.mock_conn = Mock() + self.mock_new_channel = Mock() + self.mock_conn.Channel.return_value = self.mock_new_channel + self.returned_channel = self.transport.create_channel(self.mock_conn) + + def test_new_channel_created_from_connection(self): + self.assertIs(self.mock_new_channel, self.returned_channel) + self.mock_conn.Channel.assert_called_with( + self.mock_conn, self.transport, + ) + + def test_new_channel_added_to_connection_channel_list(self): + append_method = self.mock_conn.channels.append + append_method.assert_called_with(self.mock_new_channel) + + +@case_no_python3 +@case_no_pypy +@disable_runtime_dependency_check +class TestTransportEstablishConnection(Case): + + def setUp(self): + + class MockClient(object): + pass + + self.client = MockClient() + self.client.connect_timeout = 4 + self.client.ssl = False + self.client.transport_options = {} + self.client.userid = None + self.client.password = None + self.client.login_method = None + self.transport = Transport(self.client) + self.mock_conn = Mock() + self.transport.Connection = self.mock_conn + + def test_transport_establish_conn_new_option_overwrites_default(self): + self.client.userid = 'new-userid' + self.client.password = 'new-password' + self.transport.establish_connection() + self.mock_conn.assert_called_once_with( + username=self.client.userid, + password=self.client.password, + sasl_mechanisms='PLAIN', + host='localhost', + timeout=4, + port=5672, + transport='tcp', + ) + + def test_transport_establish_conn_empty_client_is_default(self): + self.transport.establish_connection() + self.mock_conn.assert_called_once_with( + sasl_mechanisms='ANONYMOUS', + host='localhost', + timeout=4, + port=5672, + transport='tcp', + ) + + def test_transport_establish_conn_additional_transport_option(self): + new_param_value = 'mynewparam' + self.client.transport_options['new_param'] = new_param_value + self.transport.establish_connection() + self.mock_conn.assert_called_once_with( + sasl_mechanisms='ANONYMOUS', + host='localhost', + timeout=4, + new_param=new_param_value, + port=5672, + transport='tcp', + ) + + def test_transport_establish_conn_transform_localhost_to_127_0_0_1(self): + self.client.hostname = 'localhost' + self.transport.establish_connection() + self.mock_conn.assert_called_once_with( + sasl_mechanisms='ANONYMOUS', + host='localhost', + timeout=4, + port=5672, + transport='tcp', + ) + + def test_transport_password_no_userid_raises_exception(self): + self.client.password = 'somepass' + self.assertRaises(Exception, self.transport.establish_connection) + + def test_transport_userid_no_password_raises_exception(self): + self.client.userid = 'someusername' + self.assertRaises(Exception, self.transport.establish_connection) + + def test_transport_overrides_sasl_mech_from_login_method(self): + self.client.login_method = 'EXTERNAL' + self.transport.establish_connection() + self.mock_conn.assert_called_once_with( + sasl_mechanisms='EXTERNAL', + host='localhost', + timeout=4, + port=5672, + transport='tcp', + ) + + def test_transport_overrides_sasl_mech_has_username(self): + self.client.userid = 'new-userid' + self.client.login_method = 'EXTERNAL' + self.transport.establish_connection() + self.mock_conn.assert_called_once_with( + username=self.client.userid, + sasl_mechanisms='EXTERNAL', + host='localhost', + timeout=4, + port=5672, + transport='tcp', + ) + + def test_transport_establish_conn_set_password(self): + self.client.userid = 'someuser' + self.client.password = 'somepass' + self.transport.establish_connection() + self.mock_conn.assert_called_once_with( + username='someuser', + password='somepass', + sasl_mechanisms='PLAIN', + host='localhost', + timeout=4, + port=5672, + transport='tcp', + ) + + def test_transport_establish_conn_no_ssl_sets_transport_tcp(self): + self.client.ssl = False + self.transport.establish_connection() + self.mock_conn.assert_called_once_with( + sasl_mechanisms='ANONYMOUS', + host='localhost', + timeout=4, + port=5672, + transport='tcp', + ) + + def test_transport_establish_conn_with_ssl_with_hostname_check(self): + self.client.ssl = { + 'keyfile': 'my_keyfile', + 'certfile': 'my_certfile', + 'ca_certs': 'my_cacerts', + 'cert_reqs': ssl.CERT_REQUIRED, + } + self.transport.establish_connection() + self.mock_conn.assert_called_once_with( + ssl_certfile='my_certfile', + ssl_trustfile='my_cacerts', + timeout=4, + ssl_skip_hostname_check=False, + sasl_mechanisms='ANONYMOUS', + host='localhost', + ssl_keyfile='my_keyfile', + port=5672, transport='ssl', + ) + + def test_transport_establish_conn_with_ssl_skip_hostname_check(self): + self.client.ssl = { + 'keyfile': 'my_keyfile', + 'certfile': 'my_certfile', + 'ca_certs': 'my_cacerts', + 'cert_reqs': ssl.CERT_OPTIONAL, + } + self.transport.establish_connection() + self.mock_conn.assert_called_once_with( + ssl_certfile='my_certfile', + ssl_trustfile='my_cacerts', + timeout=4, + ssl_skip_hostname_check=True, + sasl_mechanisms='ANONYMOUS', + host='localhost', + ssl_keyfile='my_keyfile', + port=5672, transport='ssl', + ) + + def test_transport_establish_conn_sets_client_on_connection_object(self): + self.transport.establish_connection() + self.assertIs(self.mock_conn.return_value.client, self.client) + + def test_transport_establish_conn_creates_session_on_transport(self): + self.transport.establish_connection() + qpid_conn = self.mock_conn.return_value.get_qpid_connection + new_mock_session = qpid_conn.return_value.session.return_value + self.assertIs(self.transport.session, new_mock_session) + + def test_transport_establish_conn_returns_new_connection_object(self): + new_conn = self.transport.establish_connection() + self.assertIs(new_conn, self.mock_conn.return_value) + + def test_transport_establish_conn_uses_hostname_if_not_default(self): + self.client.hostname = 'some_other_hostname' + self.transport.establish_connection() + self.mock_conn.assert_called_once_with( + sasl_mechanisms='ANONYMOUS', + host='some_other_hostname', + timeout=4, + port=5672, + transport='tcp', + ) + + def test_transport_sets_qpid_message_ready_handler(self): + self.transport.establish_connection() + qpid_conn_call = self.mock_conn.return_value.get_qpid_connection + mock_session = qpid_conn_call.return_value.session.return_value + mock_set_callback = mock_session.set_message_received_notify_handler + expected_msg_callback = self.transport._qpid_message_ready_handler + mock_set_callback.assert_called_once_with(expected_msg_callback) + + def test_transport_sets_session_exception_handler(self): + self.transport.establish_connection() + qpid_conn_call = self.mock_conn.return_value.get_qpid_connection + mock_session = qpid_conn_call.return_value.session.return_value + mock_set_callback = mock_session.set_async_exception_notify_handler + exc_callback = self.transport._qpid_async_exception_notify_handler + mock_set_callback.assert_called_once_with(exc_callback) + + def test_transport_sets_connection_exception_handler(self): + self.transport.establish_connection() + qpid_conn_call = self.mock_conn.return_value.get_qpid_connection + qpid_conn = qpid_conn_call.return_value + mock_set_callback = qpid_conn.set_async_exception_notify_handler + exc_callback = self.transport._qpid_async_exception_notify_handler + mock_set_callback.assert_called_once_with(exc_callback) + + +@case_no_python3 +@case_no_pypy +class TestTransportClassAttributes(Case): + + def test_verify_Connection_attribute(self): + self.assertIs(Connection, Transport.Connection) + + def test_verify_polling_disabled(self): + self.assertIsNone(Transport.polling_interval) + + def test_transport_verify_supports_asynchronous_events(self): + self.assertTrue(Transport.supports_ev) + + def test_verify_driver_type_and_name(self): + self.assertEqual('qpid', Transport.driver_type) + self.assertEqual('qpid', Transport.driver_name) + + def test_transport_verify_recoverable_connection_errors(self): + connection_errors = Transport.recoverable_connection_errors + self.assertIn(ConnectionError, connection_errors) + self.assertIn(select.error, connection_errors) + + def test_transport_verify_recoverable_channel_errors(self): + channel_errors = Transport.recoverable_channel_errors + self.assertIn(NotFound, channel_errors) + + def test_transport_verify_pre_kombu_3_0_exception_labels(self): + self.assertEqual(Transport.recoverable_channel_errors, + Transport.channel_errors) + self.assertEqual(Transport.recoverable_connection_errors, + Transport.connection_errors) + + +@case_no_python3 +@case_no_pypy +@disable_runtime_dependency_check +class TestTransportRegisterWithEventLoop(Case): + + def test_transport_register_with_event_loop_calls_add_reader(self): + transport = Transport(Mock()) + mock_connection = Mock() + mock_loop = Mock() + transport.register_with_event_loop(mock_connection, mock_loop) + mock_loop.add_reader.assert_called_with( + transport.r, transport.on_readable, mock_connection, mock_loop, + ) + + +@case_no_python3 +@case_no_pypy +@disable_runtime_dependency_check +class TestTransportQpidCallbackHandlersAsync(Case): + + def setUp(self): + self.patch_a = patch(QPID_MODULE + '.os.write') + self.mock_os_write = self.patch_a.start() + self.transport = Transport(Mock()) + self.transport.register_with_event_loop(Mock(), Mock()) + + def tearDown(self): + self.patch_a.stop() + + def test__qpid_message_ready_handler_writes_symbol_to_fd(self): + self.transport._qpid_message_ready_handler(Mock()) + self.mock_os_write.assert_called_once_with(self.transport._w, '0') + + def test__qpid_async_exception_notify_handler_writes_symbol_to_fd(self): + self.transport._qpid_async_exception_notify_handler(Mock(), Mock()) + self.mock_os_write.assert_called_once_with(self.transport._w, 'e') + + +@case_no_python3 +@case_no_pypy +@disable_runtime_dependency_check +class TestTransportQpidCallbackHandlersSync(Case): + + def setUp(self): + self.patch_a = patch(QPID_MODULE + '.os.write') + self.mock_os_write = self.patch_a.start() + self.transport = Transport(Mock()) + + def tearDown(self): + self.patch_a.stop() + + def test__qpid_message_ready_handler_dows_not_write(self): + self.transport._qpid_message_ready_handler(Mock()) + self.assertTrue(not self.mock_os_write.called) + + def test__qpid_async_exception_notify_handler_does_not_write(self): + self.transport._qpid_async_exception_notify_handler(Mock(), Mock()) + self.assertTrue(not self.mock_os_write.called) + + +@case_no_python3 +@case_no_pypy +@disable_runtime_dependency_check +class TestTransportOnReadable(Case): + + def setUp(self): + self.patch_a = patch(QPID_MODULE + '.os.read') + self.mock_os_read = self.patch_a.start() + + self.patch_b = patch.object(Transport, 'drain_events') + self.mock_drain_events = self.patch_b.start() + self.transport = Transport(Mock()) + self.transport.register_with_event_loop(Mock(), Mock()) + + def tearDown(self): + self.patch_a.stop() + self.patch_b.stop() + + def test_transport_on_readable_reads_symbol_from_fd(self): + self.transport.on_readable(Mock(), Mock()) + self.mock_os_read.assert_called_once_with(self.transport.r, 1) + + def test_transport_on_readable_calls_drain_events(self): + mock_connection = Mock() + self.transport.on_readable(mock_connection, Mock()) + self.mock_drain_events.assert_called_with(mock_connection) + + def test_transport_on_readable_catches_socket_timeout(self): + self.mock_drain_events.side_effect = socket.timeout() + self.transport.on_readable(Mock(), Mock()) + + def test_transport_on_readable_ignores_non_socket_timeout_exception(self): + self.mock_drain_events.side_effect = IOError() + with self.assertRaises(IOError): + self.transport.on_readable(Mock(), Mock()) + + +@case_no_python3 +@case_no_pypy +@disable_runtime_dependency_check +class TestTransportVerifyRuntimeEnvironment(Case): + + def setUp(self): + self.verify_runtime_environment = Transport.verify_runtime_environment + self.patch_a = patch.object(Transport, 'verify_runtime_environment') + self.patch_a.start() + self.transport = Transport(Mock()) + + def tearDown(self): + self.patch_a.stop() + + @patch(QPID_MODULE + '.PY3', new=True) + def test_raises_exception_for_Python3(self): + with self.assertRaises(RuntimeError): + self.verify_runtime_environment(self.transport) + + @patch('__builtin__.getattr') + def test_raises_exc_for_PyPy(self, mock_getattr): + mock_getattr.return_value = True + with self.assertRaises(RuntimeError): + self.verify_runtime_environment(self.transport) + + @patch(QPID_MODULE + '.dependency_is_none') + def test_raises_exc_dep_missing(self, mock_dep_is_none): + mock_dep_is_none.return_value = True + with self.assertRaises(RuntimeError): + self.verify_runtime_environment(self.transport) + + @patch(QPID_MODULE + '.dependency_is_none') + def test_calls_dependency_is_none(self, mock_dep_is_none): + mock_dep_is_none.return_value = False + self.verify_runtime_environment(self.transport) + self.assertTrue(mock_dep_is_none.called) + + def test_raises_no_exception(self): + self.verify_runtime_environment(self.transport) + + +@case_no_python3 +@case_no_pypy +@disable_runtime_dependency_check +class TestTransport(ExtraAssertionsMixin, Case): + + def setUp(self): + """Creates a mock_client to be used in testing.""" + self.mock_client = Mock() + + def test_close_connection(self): + """Test that close_connection calls close on the connection.""" + my_transport = Transport(self.mock_client) + mock_connection = Mock() + my_transport.close_connection(mock_connection) + mock_connection.close.assert_called_once_with() + + def test_default_connection_params(self): + """Test that the default_connection_params are correct""" + correct_params = { + 'hostname': 'localhost', + 'port': 5672, + } + my_transport = Transport(self.mock_client) + result_params = my_transport.default_connection_params + self.assertDictEqual(correct_params, result_params) + + @patch(QPID_MODULE + '.os.close') + def test_del_sync(self, close): + my_transport = Transport(self.mock_client) + my_transport.__del__() + self.assertFalse(close.called) + + @patch(QPID_MODULE + '.os.close') + def test_del_async(self, close): + my_transport = Transport(self.mock_client) + my_transport.register_with_event_loop(Mock(), Mock()) + my_transport.__del__() + self.assertTrue(close.called) + + @patch(QPID_MODULE + '.os.close') + def test_del_async_failed(self, close): + close.side_effect = OSError() + my_transport = Transport(self.mock_client) + my_transport.register_with_event_loop(Mock(), Mock()) + my_transport.__del__() + self.assertTrue(close.called) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_redis.py b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_redis.py new file mode 100644 index 0000000..5d3aab9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/tests/transport/test_redis.py @@ -0,0 +1,1269 @@ +from __future__ import absolute_import + +import socket +import types + +from anyjson import dumps, loads +from collections import defaultdict +from contextlib import contextmanager +from itertools import count + +from kombu import Connection, Exchange, Queue, Consumer, Producer +from kombu.exceptions import InconsistencyError, VersionMismatch +from kombu.five import Empty, Queue as _Queue +from kombu.transport import virtual +from kombu.utils import eventio # patch poll + +from kombu.tests.case import ( + Case, ContextMock, Mock, call, module_exists, skip_if_not_module, patch, +) + + +class JSONEqual(object): + # The order in which a dict is serialized to json depends on the hashseed + # so we have this to support json in .assert_has_call*. + + def __init__(self, expected): + self.expected = expected + + def __eq__(self, other): + return loads(other) == loads(self.expected) + + def __str__(self): + return self.expected + + def __repr__(self): + return '(json)%r' % (self.expected,) + + +class _poll(eventio._select): + + def register(self, fd, flags): + if flags & eventio.READ: + self._rfd.add(fd) + + def poll(self, timeout): + events = [] + for fd in self._rfd: + if fd.data: + events.append((fd.fileno(), eventio.READ)) + return events + + +eventio.poll = _poll +# must import after poller patch +from kombu.transport import redis # noqa + + +class ResponseError(Exception): + pass + + +class Client(object): + queues = {} + sets = defaultdict(set) + hashes = defaultdict(dict) + shard_hint = None + + def __init__(self, db=None, port=None, connection_pool=None, **kwargs): + self._called = [] + self._connection = None + self.bgsave_raises_ResponseError = False + self.connection = self._sconnection(self) + + def bgsave(self): + self._called.append('BGSAVE') + if self.bgsave_raises_ResponseError: + raise ResponseError() + + def delete(self, key): + self.queues.pop(key, None) + + def exists(self, key): + return key in self.queues or key in self.sets + + def hset(self, key, k, v): + self.hashes[key][k] = v + + def hget(self, key, k): + return self.hashes[key].get(k) + + def hdel(self, key, k): + self.hashes[key].pop(k, None) + + def sadd(self, key, member, *args): + self.sets[key].add(member) + zadd = sadd + + def smembers(self, key): + return self.sets.get(key, set()) + + def srem(self, key, *args): + self.sets.pop(key, None) + zrem = srem + + def llen(self, key): + try: + return self.queues[key].qsize() + except KeyError: + return 0 + + def lpush(self, key, value): + self.queues[key].put_nowait(value) + + def parse_response(self, connection, type, **options): + cmd, queues = self.connection._sock.data.pop() + assert cmd == type + self.connection._sock.data = [] + if type == 'BRPOP': + item = self.brpop(queues, 0.001) + if item: + return item + raise Empty() + + def brpop(self, keys, timeout=None): + key = keys[0] + try: + item = self.queues[key].get(timeout=timeout) + except Empty: + pass + else: + return key, item + + def rpop(self, key): + try: + return self.queues[key].get_nowait() + except KeyError: + pass + + def __contains__(self, k): + return k in self._called + + def pipeline(self): + return Pipeline(self) + + def encode(self, value): + return str(value) + + def _new_queue(self, key): + self.queues[key] = _Queue() + + class _sconnection(object): + disconnected = False + + class _socket(object): + blocking = True + filenos = count(30) + + def __init__(self, *args): + self._fileno = next(self.filenos) + self.data = [] + + def fileno(self): + return self._fileno + + def setblocking(self, blocking): + self.blocking = blocking + + def __init__(self, client): + self.client = client + self._sock = self._socket() + + def disconnect(self): + self.disconnected = True + + def send_command(self, cmd, *args): + self._sock.data.append((cmd, args)) + + def info(self): + return {'foo': 1} + + def pubsub(self, *args, **kwargs): + connection = self.connection + + class ConnectionPool(object): + + def get_connection(self, *args, **kwargs): + return connection + self.connection_pool = ConnectionPool() + + return self + + def __repr__(self): + return '= (3, 0): + raise SkipTest('not relevant on py3k') + + def test_str_to_bytes(self): + with clean_encoding() as e: + self.assertIsInstance(e.str_to_bytes('foobar'), bytes_t) + + def test_from_utf8(self): + with clean_encoding() as e: + self.assertIsInstance(e.from_utf8('foobar'), bytes_t) + + def test_default_encode(self): + with clean_encoding() as e: + self.assertTrue(e.default_encode(b'foo')) + + +class test_safe_str(Case): + + def setUp(self): + self._cencoding = patch('sys.getfilesystemencoding') + self._encoding = self._cencoding.__enter__() + self._encoding.return_value = 'ascii' + + def tearDown(self): + self._cencoding.__exit__() + + def test_when_bytes(self): + self.assertEqual(safe_str('foo'), 'foo') + + def test_when_unicode(self): + self.assertIsInstance(safe_str('foo'), string_t) + + def test_when_encoding_utf8(self): + with patch('sys.getfilesystemencoding') as encoding: + encoding.return_value = 'utf-8' + self.assertEqual(default_encoding(), 'utf-8') + s = 'The quiæk fåx jømps øver the lazy dåg' + res = safe_str(s) + self.assertIsInstance(res, str) + + def test_when_containing_high_chars(self): + with patch('sys.getfilesystemencoding') as encoding: + encoding.return_value = 'ascii' + s = 'The quiæk fåx jømps øver the lazy dåg' + res = safe_str(s) + self.assertIsInstance(res, str) + self.assertEqual(len(s), len(res)) + + def test_when_not_string(self): + o = object() + self.assertEqual(safe_str(o), repr(o)) + + def test_when_unrepresentable(self): + + class O(object): + + def __repr__(self): + raise KeyError('foo') + + self.assertIn('= (3, 0): + from io import StringIO, BytesIO +else: + from StringIO import StringIO, StringIO as BytesIO # noqa + + +class OldString(object): + + def __init__(self, value): + self.value = value + + def __str__(self): + return self.value + + def split(self, *args, **kwargs): + return self.value.split(*args, **kwargs) + + def rsplit(self, *args, **kwargs): + return self.value.rsplit(*args, **kwargs) + + +class test_kombu_module(Case): + + def test_dir(self): + import kombu + self.assertTrue(dir(kombu)) + + +class test_utils(Case): + + def test_maybe_list(self): + self.assertEqual(utils.maybe_list(None), []) + self.assertEqual(utils.maybe_list(1), [1]) + self.assertEqual(utils.maybe_list([1, 2, 3]), [1, 2, 3]) + + def test_fxrange_no_repeatlast(self): + self.assertEqual(list(utils.fxrange(1.0, 3.0, 1.0)), + [1.0, 2.0, 3.0]) + + def test_fxrangemax(self): + self.assertEqual(list(utils.fxrangemax(1.0, 3.0, 1.0, 30.0)), + [1.0, 2.0, 3.0, 3.0, 3.0, 3.0, + 3.0, 3.0, 3.0, 3.0, 3.0]) + self.assertEqual(list(utils.fxrangemax(1.0, None, 1.0, 30.0)), + [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]) + + def test_reprkwargs(self): + self.assertTrue(utils.reprkwargs({'foo': 'bar', 1: 2, 'k': 'v'})) + + def test_reprcall(self): + self.assertTrue( + utils.reprcall('add', (2, 2), {'copy': True}), + ) + + +class test_UUID(Case): + + def test_uuid4(self): + self.assertNotEqual(utils.uuid4(), + utils.uuid4()) + + def test_uuid(self): + i1 = utils.uuid() + i2 = utils.uuid() + self.assertIsInstance(i1, str) + self.assertNotEqual(i1, i2) + + @skip_if_module('__pypy__') + def test_uuid_without_ctypes(self): + old_utils = sys.modules.pop('kombu.utils') + + @mask_modules('ctypes') + def with_ctypes_masked(): + from kombu.utils import ctypes, uuid + + self.assertIsNone(ctypes) + tid = uuid() + self.assertTrue(tid) + self.assertIsInstance(tid, string_t) + + try: + with_ctypes_masked() + finally: + sys.modules['celery.utils'] = old_utils + + +class test_Misc(Case): + + def test_kwdict(self): + + def f(**kwargs): + return kwargs + + kw = {'foo': 'foo', + 'bar': 'bar'} + self.assertTrue(f(**utils.kwdict(kw))) + + +class MyStringIO(StringIO): + + def close(self): + pass + + +class MyBytesIO(BytesIO): + + def close(self): + pass + + +class test_emergency_dump_state(Case): + + @redirect_stdouts + def test_dump(self, stdout, stderr): + fh = MyBytesIO() + + utils.emergency_dump_state({'foo': 'bar'}, open_file=lambda n, m: fh) + self.assertDictEqual(pickle.loads(fh.getvalue()), {'foo': 'bar'}) + self.assertTrue(stderr.getvalue()) + self.assertFalse(stdout.getvalue()) + + @redirect_stdouts + def test_dump_second_strategy(self, stdout, stderr): + fh = MyStringIO() + + def raise_something(*args, **kwargs): + raise KeyError('foo') + + utils.emergency_dump_state( + {'foo': 'bar'}, + open_file=lambda n, m: fh, dump=raise_something + ) + self.assertIn('foo', fh.getvalue()) + self.assertIn('bar', fh.getvalue()) + self.assertTrue(stderr.getvalue()) + self.assertFalse(stdout.getvalue()) + + +def insomnia(fun): + + @wraps(fun) + def _inner(*args, **kwargs): + def mysleep(i): + pass + + prev_sleep = utils.sleep + utils.sleep = mysleep + try: + return fun(*args, **kwargs) + finally: + utils.sleep = prev_sleep + + return _inner + + +class test_retry_over_time(Case): + + def setUp(self): + self.index = 0 + + class Predicate(Exception): + pass + + def myfun(self): + if self.index < 9: + raise self.Predicate() + return 42 + + def errback(self, exc, intervals, retries): + interval = next(intervals) + sleepvals = (None, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 16.0) + self.index += 1 + self.assertEqual(interval, sleepvals[self.index]) + return interval + + @insomnia + def test_simple(self): + prev_count, utils.count = utils.count, Mock() + try: + utils.count.return_value = list(range(1)) + x = utils.retry_over_time(self.myfun, self.Predicate, + errback=None, interval_max=14) + self.assertIsNone(x) + utils.count.return_value = list(range(10)) + cb = Mock() + x = utils.retry_over_time(self.myfun, self.Predicate, + errback=self.errback, callback=cb, + interval_max=14) + self.assertEqual(x, 42) + self.assertEqual(self.index, 9) + cb.assert_called_with() + finally: + utils.count = prev_count + + @insomnia + def test_retry_once(self): + with self.assertRaises(self.Predicate): + utils.retry_over_time( + self.myfun, self.Predicate, + max_retries=1, errback=self.errback, interval_max=14, + ) + self.assertEqual(self.index, 1) + # no errback + with self.assertRaises(self.Predicate): + utils.retry_over_time( + self.myfun, self.Predicate, + max_retries=1, errback=None, interval_max=14, + ) + + @insomnia + def test_retry_always(self): + Predicate = self.Predicate + + class Fun(object): + + def __init__(self): + self.calls = 0 + + def __call__(self, *args, **kwargs): + try: + if self.calls >= 10: + return 42 + raise Predicate() + finally: + self.calls += 1 + fun = Fun() + + self.assertEqual( + utils.retry_over_time( + fun, self.Predicate, + max_retries=0, errback=None, interval_max=14, + ), + 42, + ) + self.assertEqual(fun.calls, 11) + + +class test_cached_property(Case): + + def test_deleting(self): + + class X(object): + xx = False + + @utils.cached_property + def foo(self): + return 42 + + @foo.deleter # noqa + def foo(self, value): + self.xx = value + + x = X() + del(x.foo) + self.assertFalse(x.xx) + x.__dict__['foo'] = 'here' + del(x.foo) + self.assertEqual(x.xx, 'here') + + def test_when_access_from_class(self): + + class X(object): + xx = None + + @utils.cached_property + def foo(self): + return 42 + + @foo.setter # noqa + def foo(self, value): + self.xx = 10 + + desc = X.__dict__['foo'] + self.assertIs(X.foo, desc) + + self.assertIs(desc.__get__(None), desc) + self.assertIs(desc.__set__(None, 1), desc) + self.assertIs(desc.__delete__(None), desc) + self.assertTrue(desc.setter(1)) + + x = X() + x.foo = 30 + self.assertEqual(x.xx, 10) + + del(x.foo) + + +class test_symbol_by_name(Case): + + def test_instance_returns_instance(self): + instance = object() + self.assertIs(utils.symbol_by_name(instance), instance) + + def test_returns_default(self): + default = object() + self.assertIs( + utils.symbol_by_name('xyz.ryx.qedoa.weq:foz', default=default), + default, + ) + + def test_no_default(self): + with self.assertRaises(ImportError): + utils.symbol_by_name('xyz.ryx.qedoa.weq:foz') + + def test_imp_reraises_ValueError(self): + imp = Mock() + imp.side_effect = ValueError() + with self.assertRaises(ValueError): + utils.symbol_by_name('kombu.Connection', imp=imp) + + def test_package(self): + from kombu.entity import Exchange + self.assertIs( + utils.symbol_by_name('.entity:Exchange', package='kombu'), + Exchange, + ) + self.assertTrue(utils.symbol_by_name(':Consumer', package='kombu')) + + +class test_ChannelPromise(Case): + + def test_repr(self): + obj = Mock(name='cb') + self.assertIn( + 'promise', + repr(utils.ChannelPromise(obj)), + ) + self.assertFalse(obj.called) + + +class test_entrypoints(Case): + + @mask_modules('pkg_resources') + def test_without_pkg_resources(self): + self.assertListEqual(list(utils.entrypoints('kombu.test')), []) + + @module_exists('pkg_resources') + def test_with_pkg_resources(self): + with patch('pkg_resources.iter_entry_points', create=True) as iterep: + eps = iterep.return_value = [Mock(), Mock()] + + self.assertTrue(list(utils.entrypoints('kombu.test'))) + iterep.assert_called_with('kombu.test') + eps[0].load.assert_called_with() + eps[1].load.assert_called_with() + + +class test_shufflecycle(Case): + + def test_shuffles(self): + prev_repeat, utils.repeat = utils.repeat, Mock() + try: + utils.repeat.return_value = list(range(10)) + values = set(['A', 'B', 'C']) + cycle = utils.shufflecycle(values) + seen = set() + for i in range(10): + next(cycle) + utils.repeat.assert_called_with(None) + self.assertTrue(seen.issubset(values)) + with self.assertRaises(StopIteration): + next(cycle) + next(cycle) + finally: + utils.repeat = prev_repeat + + +class test_version_string_as_tuple(Case): + + def test_versions(self): + self.assertTupleEqual( + version_string_as_tuple('3'), + version_info_t(3, 0, 0, '', ''), + ) + self.assertTupleEqual( + version_string_as_tuple('3.3'), + version_info_t(3, 3, 0, '', ''), + ) + self.assertTupleEqual( + version_string_as_tuple('3.3.1'), + version_info_t(3, 3, 1, '', ''), + ) + self.assertTupleEqual( + version_string_as_tuple('3.3.1a3'), + version_info_t(3, 3, 1, 'a3', ''), + ) + self.assertTupleEqual( + version_string_as_tuple('3.3.1a3-40c32'), + version_info_t(3, 3, 1, 'a3', '40c32'), + ) + self.assertEqual( + version_string_as_tuple('3.3.1.a3.40c32'), + version_info_t(3, 3, 1, 'a3', '40c32'), + ) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/SLMQ.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/SLMQ.py new file mode 100644 index 0000000..449bc2f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/SLMQ.py @@ -0,0 +1,186 @@ +""" +kombu.transport.SLMQ +==================== + +SoftLayer Message Queue transport. + +""" +from __future__ import absolute_import + +import socket +import string + +from anyjson import loads, dumps + +import os + +from kombu.five import Empty, text_t +from kombu.utils import cached_property # , uuid +from kombu.utils.encoding import bytes_to_str, safe_str + +from . import virtual + +try: + from softlayer_messaging import get_client + from softlayer_messaging.errors import ResponseError +except ImportError: # pragma: no cover + get_client = ResponseError = None # noqa + +# dots are replaced by dash, all other punctuation replaced by underscore. +CHARS_REPLACE_TABLE = dict( + (ord(c), 0x5f) for c in string.punctuation if c not in '_') + + +class Channel(virtual.Channel): + default_visibility_timeout = 1800 # 30 minutes. + domain_format = 'kombu%(vhost)s' + _slmq = None + _queue_cache = {} + _noack_queues = set() + + def __init__(self, *args, **kwargs): + if get_client is None: + raise ImportError( + 'SLMQ transport requires the softlayer_messaging library', + ) + super(Channel, self).__init__(*args, **kwargs) + queues = self.slmq.queues() + for queue in queues: + self._queue_cache[queue] = queue + + def basic_consume(self, queue, no_ack, *args, **kwargs): + if no_ack: + self._noack_queues.add(queue) + return super(Channel, self).basic_consume(queue, no_ack, + *args, **kwargs) + + def basic_cancel(self, consumer_tag): + if consumer_tag in self._consumers: + queue = self._tag_to_queue[consumer_tag] + self._noack_queues.discard(queue) + return super(Channel, self).basic_cancel(consumer_tag) + + def entity_name(self, name, table=CHARS_REPLACE_TABLE): + """Format AMQP queue name into a valid SLQS queue name.""" + return text_t(safe_str(name)).translate(table) + + def _new_queue(self, queue, **kwargs): + """Ensures a queue exists in SLQS.""" + queue = self.entity_name(self.queue_name_prefix + queue) + try: + return self._queue_cache[queue] + except KeyError: + try: + self.slmq.create_queue( + queue, visibility_timeout=self.visibility_timeout) + except ResponseError: + pass + q = self._queue_cache[queue] = self.slmq.queue(queue) + return q + + def _delete(self, queue, *args): + """delete queue by name.""" + queue_name = self.entity_name(queue) + self._queue_cache.pop(queue_name, None) + self.slmq.queue(queue_name).delete(force=True) + super(Channel, self)._delete(queue_name) + + def _put(self, queue, message, **kwargs): + """Put message onto queue.""" + q = self._new_queue(queue) + q.push(dumps(message)) + + def _get(self, queue): + """Try to retrieve a single message off ``queue``.""" + q = self._new_queue(queue) + rs = q.pop(1) + if rs['items']: + m = rs['items'][0] + payload = loads(bytes_to_str(m['body'])) + if queue in self._noack_queues: + q.message(m['id']).delete() + else: + payload['properties']['delivery_info'].update({ + 'slmq_message_id': m['id'], 'slmq_queue_name': q.name}) + return payload + raise Empty() + + def basic_ack(self, delivery_tag): + delivery_info = self.qos.get(delivery_tag).delivery_info + try: + queue = delivery_info['slmq_queue_name'] + except KeyError: + pass + else: + self.delete_message(queue, delivery_info['slmq_message_id']) + super(Channel, self).basic_ack(delivery_tag) + + def _size(self, queue): + """Return the number of messages in a queue.""" + return self._new_queue(queue).detail()['message_count'] + + def _purge(self, queue): + """Delete all current messages in a queue.""" + q = self._new_queue(queue) + n = 0 + l = q.pop(10) + while l['items']: + for m in l['items']: + self.delete_message(queue, m['id']) + n += 1 + l = q.pop(10) + return n + + def delete_message(self, queue, message_id): + q = self.slmq.queue(self.entity_name(queue)) + return q.message(message_id).delete() + + @property + def slmq(self): + if self._slmq is None: + conninfo = self.conninfo + account = os.environ.get('SLMQ_ACCOUNT', conninfo.virtual_host) + user = os.environ.get('SL_USERNAME', conninfo.userid) + api_key = os.environ.get('SL_API_KEY', conninfo.password) + host = os.environ.get('SLMQ_HOST', conninfo.hostname) + port = os.environ.get('SLMQ_PORT', conninfo.port) + secure = bool(os.environ.get( + 'SLMQ_SECURE', self.transport_options.get('secure')) or True, + ) + endpoint = '{0}://{1}{2}'.format( + 'https' if secure else 'http', host, + ':{0}'.format(port) if port else '', + ) + + self._slmq = get_client(account, endpoint=endpoint) + self._slmq.authenticate(user, api_key) + return self._slmq + + @property + def conninfo(self): + return self.connection.client + + @property + def transport_options(self): + return self.connection.client.transport_options + + @cached_property + def visibility_timeout(self): + return (self.transport_options.get('visibility_timeout') or + self.default_visibility_timeout) + + @cached_property + def queue_name_prefix(self): + return self.transport_options.get('queue_name_prefix', '') + + +class Transport(virtual.Transport): + Channel = Channel + + polling_interval = 1 + default_port = None + connection_errors = ( + virtual.Transport.connection_errors + ( + ResponseError, socket.error + ) + ) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/SQS.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/SQS.py new file mode 100644 index 0000000..68cb053 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/SQS.py @@ -0,0 +1,539 @@ +""" +kombu.transport.SQS +=================== + +Amazon SQS transport module for Kombu. This package implements an AMQP-like +interface on top of Amazons SQS service, with the goal of being optimized for +high performance and reliability. + +The default settings for this module are focused now on high performance in +task queue situations where tasks are small, idempotent and run very fast. + +SQS Features supported by this transport: + Long Polling: + http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ + sqs-long-polling.html + + Long polling is enabled by setting the `wait_time_seconds` transport + option to a number > 1. Amazon supports up to 20 seconds. This is + disabled for now, but will be enabled by default in the near future. + + Batch API Actions: + http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ + sqs-batch-api.html + + The default behavior of the SQS Channel.drain_events() method is to + request up to the 'prefetch_count' messages on every request to SQS. + These messages are stored locally in a deque object and passed back + to the Transport until the deque is empty, before triggering a new + API call to Amazon. + + This behavior dramatically speeds up the rate that you can pull tasks + from SQS when you have short-running tasks (or a large number of workers). + + When a Celery worker has multiple queues to monitor, it will pull down + up to 'prefetch_count' messages from queueA and work on them all before + moving on to queueB. If queueB is empty, it will wait up until + 'polling_interval' expires before moving back and checking on queueA. +""" + +from __future__ import absolute_import + +import collections +import socket +import string + +from anyjson import loads, dumps + +import boto +from boto import exception +from boto import sdb as _sdb +from boto import sqs as _sqs +from boto.sdb.domain import Domain +from boto.sdb.connection import SDBConnection +from boto.sqs.connection import SQSConnection +from boto.sqs.message import Message + +from kombu.five import Empty, range, text_t +from kombu.log import get_logger +from kombu.utils import cached_property, uuid +from kombu.utils.encoding import bytes_to_str, safe_str +from kombu.transport.virtual import scheduling + +from . import virtual + +logger = get_logger(__name__) + +# dots are replaced by dash, all other punctuation +# replaced by underscore. +CHARS_REPLACE_TABLE = dict((ord(c), 0x5f) + for c in string.punctuation if c not in '-_.') +CHARS_REPLACE_TABLE[0x2e] = 0x2d # '.' -> '-' + + +def maybe_int(x): + try: + return int(x) + except ValueError: + return x +BOTO_VERSION = tuple(maybe_int(part) for part in boto.__version__.split('.')) +W_LONG_POLLING = BOTO_VERSION >= (2, 8) + +#: SQS bulk get supports a maximum of 10 messages at a time. +SQS_MAX_MESSAGES = 10 + + +class Table(Domain): + """Amazon SimpleDB domain describing the message routing table.""" + # caches queues already bound, so we don't have to declare them again. + _already_bound = set() + + def routes_for(self, exchange): + """Iterator giving all routes for an exchange.""" + return self.select("""WHERE exchange = '%s'""" % exchange) + + def get_queue(self, queue): + """Get binding for queue.""" + qid = self._get_queue_id(queue) + if qid: + return self.get_item(qid) + + def create_binding(self, queue): + """Get binding item for queue. + + Creates the item if it doesn't exist. + + """ + item = self.get_queue(queue) + if item: + return item, item['id'] + id = uuid() + return self.new_item(id), id + + def queue_bind(self, exchange, routing_key, pattern, queue): + if queue not in self._already_bound: + binding, id = self.create_binding(queue) + binding.update(exchange=exchange, + routing_key=routing_key or '', + pattern=pattern or '', + queue=queue or '', + id=id) + binding.save() + self._already_bound.add(queue) + + def queue_delete(self, queue): + """delete queue by name.""" + self._already_bound.discard(queue) + item = self._get_queue_item(queue) + if item: + self.delete_item(item) + + def exchange_delete(self, exchange): + """Delete all routes for `exchange`.""" + for item in self.routes_for(exchange): + self.delete_item(item['id']) + + def get_item(self, item_name): + """Uses `consistent_read` by default.""" + # Domain is an old-style class, can't use super(). + for consistent_read in (False, True): + item = Domain.get_item(self, item_name, consistent_read) + if item: + return item + + def select(self, query='', next_token=None, + consistent_read=True, max_items=None): + """Uses `consistent_read` by default.""" + query = """SELECT * FROM `%s` %s""" % (self.name, query) + return Domain.select(self, query, next_token, + consistent_read, max_items) + + def _try_first(self, query='', **kwargs): + for c in (False, True): + for item in self.select(query, consistent_read=c, **kwargs): + return item + + def get_exchanges(self): + return list(set(i['exchange'] for i in self.select())) + + def _get_queue_item(self, queue): + return self._try_first("""WHERE queue = '%s' limit 1""" % queue) + + def _get_queue_id(self, queue): + item = self._get_queue_item(queue) + if item: + return item['id'] + + +class Channel(virtual.Channel): + Table = Table + + default_region = 'us-east-1' + default_visibility_timeout = 1800 # 30 minutes. + default_wait_time_seconds = 0 # disabled see #198 + domain_format = 'kombu%(vhost)s' + _sdb = None + _sqs = None + _queue_cache = {} + _noack_queues = set() + + def __init__(self, *args, **kwargs): + super(Channel, self).__init__(*args, **kwargs) + + # SQS blows up when you try to create a new queue if one already + # exists with a different visibility_timeout, so this prepopulates + # the queue_cache to protect us from recreating + # queues that are known to already exist. + queues = self.sqs.get_all_queues(prefix=self.queue_name_prefix) + for queue in queues: + self._queue_cache[queue.name] = queue + self._fanout_queues = set() + + # The drain_events() method stores extra messages in a local + # Deque object. This allows multiple messages to be requested from + # SQS at once for performance, but maintains the same external API + # to the caller of the drain_events() method. + self._queue_message_cache = collections.deque() + + def basic_consume(self, queue, no_ack, *args, **kwargs): + if no_ack: + self._noack_queues.add(queue) + return super(Channel, self).basic_consume( + queue, no_ack, *args, **kwargs + ) + + def basic_cancel(self, consumer_tag): + if consumer_tag in self._consumers: + queue = self._tag_to_queue[consumer_tag] + self._noack_queues.discard(queue) + return super(Channel, self).basic_cancel(consumer_tag) + + def drain_events(self, timeout=None): + """Return a single payload message from one of our queues. + + :raises Empty: if no messages available. + + """ + # If we're not allowed to consume or have no consumers, raise Empty + if not self._consumers or not self.qos.can_consume(): + raise Empty() + message_cache = self._queue_message_cache + + # Check if there are any items in our buffer. If there are any, pop + # off that queue first. + try: + return message_cache.popleft() + except IndexError: + pass + + # At this point, go and get more messages from SQS + res, queue = self._poll(self.cycle, timeout=timeout) + message_cache.extend((r, queue) for r in res) + + # Now try to pop off the queue again. + try: + return message_cache.popleft() + except IndexError: + raise Empty() + + def _reset_cycle(self): + """Reset the consume cycle. + + :returns: a FairCycle object that points to our _get_bulk() method + rather than the standard _get() method. This allows for multiple + messages to be returned at once from SQS (based on the prefetch + limit). + + """ + self._cycle = scheduling.FairCycle( + self._get_bulk, self._active_queues, Empty, + ) + + def entity_name(self, name, table=CHARS_REPLACE_TABLE): + """Format AMQP queue name into a legal SQS queue name.""" + return text_t(safe_str(name)).translate(table) + + def _new_queue(self, queue, **kwargs): + """Ensure a queue with given name exists in SQS.""" + # Translate to SQS name for consistency with initial + # _queue_cache population. + queue = self.entity_name(self.queue_name_prefix + queue) + try: + return self._queue_cache[queue] + except KeyError: + q = self._queue_cache[queue] = self.sqs.create_queue( + queue, self.visibility_timeout, + ) + return q + + def queue_bind(self, queue, exchange=None, routing_key='', + arguments=None, **kwargs): + super(Channel, self).queue_bind(queue, exchange, routing_key, + arguments, **kwargs) + if self.typeof(exchange).type == 'fanout': + self._fanout_queues.add(queue) + + def _queue_bind(self, *args): + """Bind ``queue`` to ``exchange`` with routing key. + + Route will be stored in SDB if so enabled. + + """ + if self.supports_fanout: + self.table.queue_bind(*args) + + def get_table(self, exchange): + """Get routing table. + + Retrieved from SDB if :attr:`supports_fanout`. + + """ + if self.supports_fanout: + return [(r['routing_key'], r['pattern'], r['queue']) + for r in self.table.routes_for(exchange)] + return super(Channel, self).get_table(exchange) + + def get_exchanges(self): + if self.supports_fanout: + return self.table.get_exchanges() + return super(Channel, self).get_exchanges() + + def _delete(self, queue, *args): + """delete queue by name.""" + if self.supports_fanout: + self.table.queue_delete(queue) + super(Channel, self)._delete(queue) + self._queue_cache.pop(queue, None) + + def exchange_delete(self, exchange, **kwargs): + """Delete exchange by name.""" + if self.supports_fanout: + self.table.exchange_delete(exchange) + super(Channel, self).exchange_delete(exchange, **kwargs) + + def _has_queue(self, queue, **kwargs): + """Return True if ``queue`` was previously declared.""" + if self.supports_fanout: + return bool(self.table.get_queue(queue)) + return super(Channel, self)._has_queue(queue) + + def _put(self, queue, message, **kwargs): + """Put message onto queue.""" + q = self._new_queue(queue) + m = Message() + m.set_body(dumps(message)) + q.write(m) + + def _put_fanout(self, exchange, message, routing_key, **kwargs): + """Deliver fanout message to all queues in ``exchange``.""" + for route in self.table.routes_for(exchange): + self._put(route['queue'], message, **kwargs) + + def _get_from_sqs(self, queue, count=1): + """Retrieve messages from SQS and returns the raw SQS message objects. + + :returns: List of SQS message objects + + """ + q = self._new_queue(queue) + if W_LONG_POLLING and queue not in self._fanout_queues: + return q.get_messages( + count, wait_time_seconds=self.wait_time_seconds, + ) + else: # boto < 2.8 + return q.get_messages(count) + + def _message_to_python(self, message, queue_name, queue): + payload = loads(bytes_to_str(message.get_body())) + if queue_name in self._noack_queues: + queue.delete_message(message) + else: + payload['properties']['delivery_info'].update({ + 'sqs_message': message, 'sqs_queue': queue, + }) + return payload + + def _messages_to_python(self, messages, queue): + """Convert a list of SQS Message objects into Payloads. + + This method handles converting SQS Message objects into + Payloads, and appropriately updating the queue depending on + the 'ack' settings for that queue. + + :param messages: A list of SQS Message objects. + :param queue: String name representing the queue they came from + + :returns: A list of Payload objects + + """ + q = self._new_queue(queue) + return [self._message_to_python(m, queue, q) for m in messages] + + def _get_bulk(self, queue, max_if_unlimited=SQS_MAX_MESSAGES): + """Try to retrieve multiple messages off ``queue``. + + Where _get() returns a single Payload object, this method returns a + list of Payload objects. The number of objects returned is determined + by the total number of messages available in the queue and the + number of messages that the QoS object allows (based on the + prefetch_count). + + .. note:: + Ignores QoS limits so caller is responsible for checking + that we are allowed to consume at least one message from the + queue. get_bulk will then ask QoS for an estimate of + the number of extra messages that we can consume. + + args: + queue: The queue name (string) to pull from + + returns: + payloads: A list of payload objects returned + """ + # drain_events calls `can_consume` first, consuming + # a token, so we know that we are allowed to consume at least + # one message. + maxcount = self.qos.can_consume_max_estimate() + maxcount = max_if_unlimited if maxcount is None else max(maxcount, 1) + if maxcount: + messages = self._get_from_sqs( + queue, count=min(maxcount, SQS_MAX_MESSAGES), + ) + + if messages: + return self._messages_to_python(messages, queue) + raise Empty() + + def _get(self, queue): + """Try to retrieve a single message off ``queue``.""" + messages = self._get_from_sqs(queue, count=1) + + if messages: + return self._messages_to_python(messages, queue)[0] + raise Empty() + + def _restore(self, message, + unwanted_delivery_info=('sqs_message', 'sqs_queue')): + for unwanted_key in unwanted_delivery_info: + # Remove objects that aren't JSON serializable (Issue #1108). + message.delivery_info.pop(unwanted_key, None) + return super(Channel, self)._restore(message) + + def basic_ack(self, delivery_tag): + delivery_info = self.qos.get(delivery_tag).delivery_info + try: + queue = delivery_info['sqs_queue'] + except KeyError: + pass + else: + queue.delete_message(delivery_info['sqs_message']) + super(Channel, self).basic_ack(delivery_tag) + + def _size(self, queue): + """Return the number of messages in a queue.""" + return self._new_queue(queue).count() + + def _purge(self, queue): + """Delete all current messages in a queue.""" + q = self._new_queue(queue) + # SQS is slow at registering messages, so run for a few + # iterations to ensure messages are deleted. + size = 0 + for i in range(10): + size += q.count() + if not size: + break + q.clear() + return size + + def close(self): + super(Channel, self).close() + for conn in (self._sqs, self._sdb): + if conn: + try: + conn.close() + except AttributeError as exc: # FIXME ??? + if "can't set attribute" not in str(exc): + raise + + def _get_regioninfo(self, regions): + if self.region: + for _r in regions: + if _r.name == self.region: + return _r + + def _aws_connect_to(self, fun, regions): + conninfo = self.conninfo + region = self._get_regioninfo(regions) + return fun(region=region, + aws_access_key_id=conninfo.userid, + aws_secret_access_key=conninfo.password, + port=conninfo.port) + + @property + def sqs(self): + if self._sqs is None: + self._sqs = self._aws_connect_to(SQSConnection, _sqs.regions()) + return self._sqs + + @property + def sdb(self): + if self._sdb is None: + self._sdb = self._aws_connect_to(SDBConnection, _sdb.regions()) + return self._sdb + + @property + def table(self): + name = self.entity_name( + self.domain_format % {'vhost': self.conninfo.virtual_host}) + d = self.sdb.get_object( + 'CreateDomain', {'DomainName': name}, self.Table) + d.name = name + return d + + @property + def conninfo(self): + return self.connection.client + + @property + def transport_options(self): + return self.connection.client.transport_options + + @cached_property + def visibility_timeout(self): + return (self.transport_options.get('visibility_timeout') or + self.default_visibility_timeout) + + @cached_property + def queue_name_prefix(self): + return self.transport_options.get('queue_name_prefix', '') + + @cached_property + def supports_fanout(self): + return self.transport_options.get('sdb_persistence', False) + + @cached_property + def region(self): + return self.transport_options.get('region') or self.default_region + + @cached_property + def wait_time_seconds(self): + return self.transport_options.get('wait_time_seconds', + self.default_wait_time_seconds) + + +class Transport(virtual.Transport): + Channel = Channel + + polling_interval = 1 + wait_time_seconds = 0 + default_port = None + connection_errors = ( + virtual.Transport.connection_errors + + (exception.SQSError, socket.error) + ) + channel_errors = ( + virtual.Transport.channel_errors + (exception.SQSDecodeError, ) + ) + driver_type = 'sqs' + driver_name = 'sqs' diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/__init__.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/__init__.py new file mode 100644 index 0000000..c1d6868 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/__init__.py @@ -0,0 +1,110 @@ +""" +kombu.transport +=============== + +Built-in transports. + +""" +from __future__ import absolute_import + +from kombu.five import string_t +from kombu.syn import _detect_environment +from kombu.utils import symbol_by_name + + +def supports_librabbitmq(): + if _detect_environment() == 'default': + try: + import librabbitmq # noqa + except ImportError: # pragma: no cover + pass + else: # pragma: no cover + return True + + +def _ghettoq(name, new, alias=None): + xxx = new # stupid enclosing + + def __inner(): + import warnings + _new = callable(xxx) and xxx() or xxx + gtransport = 'ghettoq.taproot.{0}'.format(name) + ktransport = 'kombu.transport.{0}.Transport'.format(_new) + this = alias or name + warnings.warn(""" + Ghettoq does not work with Kombu, but there is now a built-in version + of the {0} transport. + + You should replace {1!r} with: {2!r} + """.format(name, gtransport, this)) + return ktransport + + return __inner + + +TRANSPORT_ALIASES = { + 'amqp': 'kombu.transport.pyamqp:Transport', + 'pyamqp': 'kombu.transport.pyamqp:Transport', + 'librabbitmq': 'kombu.transport.librabbitmq:Transport', + 'memory': 'kombu.transport.memory:Transport', + 'redis': 'kombu.transport.redis:Transport', + 'SQS': 'kombu.transport.SQS:Transport', + 'sqs': 'kombu.transport.SQS:Transport', + 'beanstalk': 'kombu.transport.beanstalk:Transport', + 'mongodb': 'kombu.transport.mongodb:Transport', + 'couchdb': 'kombu.transport.couchdb:Transport', + 'zookeeper': 'kombu.transport.zookeeper:Transport', + 'django': 'kombu.transport.django:Transport', + 'sqlalchemy': 'kombu.transport.sqlalchemy:Transport', + 'sqla': 'kombu.transport.sqlalchemy:Transport', + 'SLMQ': 'kombu.transport.SLMQ.Transport', + 'slmq': 'kombu.transport.SLMQ.Transport', + 'ghettoq.taproot.Redis': _ghettoq('Redis', 'redis', 'redis'), + 'ghettoq.taproot.Database': _ghettoq('Database', 'django', 'django'), + 'ghettoq.taproot.MongoDB': _ghettoq('MongoDB', 'mongodb'), + 'ghettoq.taproot.Beanstalk': _ghettoq('Beanstalk', 'beanstalk'), + 'ghettoq.taproot.CouchDB': _ghettoq('CouchDB', 'couchdb'), + 'filesystem': 'kombu.transport.filesystem:Transport', + 'zeromq': 'kombu.transport.zmq:Transport', + 'zmq': 'kombu.transport.zmq:Transport', + 'amqplib': 'kombu.transport.amqplib:Transport', + 'qpid': 'kombu.transport.qpid:Transport', +} + +_transport_cache = {} + + +def resolve_transport(transport=None): + if isinstance(transport, string_t): + try: + transport = TRANSPORT_ALIASES[transport] + except KeyError: + if '.' not in transport and ':' not in transport: + from kombu.utils.text import fmatch_best + alt = fmatch_best(transport, TRANSPORT_ALIASES) + if alt: + raise KeyError( + 'No such transport: {0}. Did you mean {1}?'.format( + transport, alt)) + raise KeyError('No such transport: {0}'.format(transport)) + else: + if callable(transport): + transport = transport() + return symbol_by_name(transport) + return transport + + +def get_transport_cls(transport=None): + """Get transport class by name. + + The transport string is the full path to a transport class, e.g.:: + + "kombu.transport.pyamqp:Transport" + + If the name does not include `"."` (is not fully qualified), + the alias table will be consulted. + + """ + if transport not in _transport_cache: + _transport_cache[transport] = resolve_transport(transport) + return _transport_cache[transport] diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/amqplib.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/amqplib.py new file mode 100644 index 0000000..9eb51df --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/amqplib.py @@ -0,0 +1,401 @@ +""" +kombu.transport.amqplib +======================= + +amqplib transport. + +""" +from __future__ import absolute_import + +import errno +import socket + +from kombu.five import items +from kombu.utils.encoding import str_to_bytes +from kombu.utils.amq_manager import get_manager + +from . import base + +try: + from ssl import SSLError +except ImportError: + class SSLError(Exception): # noqa + pass +from struct import unpack + + +class NA(object): + pass + +try: + from amqplib import client_0_8 as amqp + from amqplib.client_0_8 import transport + from amqplib.client_0_8.channel import Channel as _Channel + from amqplib.client_0_8.exceptions import AMQPConnectionException + from amqplib.client_0_8.exceptions import AMQPChannelException +except ImportError: # pragma: no cover + + class NAx(object): + pass + amqp = NA + amqp.Connection = NA + transport = _Channel = NA # noqa + # Sphinx crashes if this is NA, must be different class + transport.TCPTransport = transport.SSLTransport = NAx + AMQPConnectionException = AMQPChannelException = NA # noqa + +DEFAULT_PORT = 5672 +HAS_MSG_PEEK = hasattr(socket, 'MSG_PEEK') + +# amqplib's handshake mistakenly identifies as protocol version 1191, +# this breaks in RabbitMQ tip, which no longer falls back to +# 0-8 for unknown ids. +transport.AMQP_PROTOCOL_HEADER = str_to_bytes('AMQP\x01\x01\x08\x00') + + +# - fixes warnings when socket is not connected. +class TCPTransport(transport.TCPTransport): + + def read_frame(self): + frame_type, channel, size = unpack('>BHI', self._read(7, True)) + payload = self._read(size) + ch = ord(self._read(1)) + if ch == 206: # '\xce' + return frame_type, channel, payload + else: + raise Exception( + 'Framing Error, received 0x%02x while expecting 0xce' % ch) + + def _read(self, n, initial=False): + read_buffer = self._read_buffer + while len(read_buffer) < n: + try: + s = self.sock.recv(n - len(read_buffer)) + except socket.error as exc: + if not initial and exc.errno in (errno.EAGAIN, errno.EINTR): + continue + raise + if not s: + raise IOError('Socket closed') + read_buffer += s + + result = read_buffer[:n] + self._read_buffer = read_buffer[n:] + + return result + + def __del__(self): + try: + self.close() + except Exception: + pass + finally: + self.sock = None + +transport.TCPTransport = TCPTransport + + +class SSLTransport(transport.SSLTransport): + + def __init__(self, host, connect_timeout, ssl): + if isinstance(ssl, dict): + self.sslopts = ssl + self.sslobj = None + + transport._AbstractTransport.__init__(self, host, connect_timeout) + + def read_frame(self): + frame_type, channel, size = unpack('>BHI', self._read(7, True)) + payload = self._read(size) + ch = ord(self._read(1)) + if ch == 206: # '\xce' + return frame_type, channel, payload + else: + raise Exception( + 'Framing Error, received 0x%02x while expecting 0xce' % ch) + + def _read(self, n, initial=False): + result = '' + + while len(result) < n: + try: + s = self.sslobj.read(n - len(result)) + except socket.error as exc: + if not initial and exc.errno in (errno.EAGAIN, errno.EINTR): + continue + raise + if not s: + raise IOError('Socket closed') + result += s + + return result + + def __del__(self): + try: + self.close() + except Exception: + pass + finally: + self.sock = None +transport.SSLTransport = SSLTransport + + +class Connection(amqp.Connection): # pragma: no cover + connected = True + + def _do_close(self, *args, **kwargs): + # amqplib does not ignore socket errors when connection + # is closed on the remote end. + try: + super(Connection, self)._do_close(*args, **kwargs) + except socket.error: + pass + + def _dispatch_basic_return(self, channel, args, msg): + reply_code = args.read_short() + reply_text = args.read_shortstr() + exchange = args.read_shortstr() + routing_key = args.read_shortstr() + + exc = AMQPChannelException(reply_code, reply_text, (50, 60)) + if channel.events['basic_return']: + for callback in channel.events['basic_return']: + callback(exc, exchange, routing_key, msg) + else: + raise exc + + def __init__(self, *args, **kwargs): + super(Connection, self).__init__(*args, **kwargs) + self._method_override = {(60, 50): self._dispatch_basic_return} + + def drain_events(self, timeout=None): + """Wait for an event on a channel.""" + chanmap = self.channels + chanid, method_sig, args, content = self._wait_multiple( + chanmap, None, timeout=timeout) + + channel = chanmap[chanid] + + if (content and + channel.auto_decode and + hasattr(content, 'content_encoding')): + try: + content.body = content.body.decode(content.content_encoding) + except Exception: + pass + + amqp_method = self._method_override.get(method_sig) or \ + channel._METHOD_MAP.get(method_sig, None) + + if amqp_method is None: + raise Exception('Unknown AMQP method (%d, %d)' % method_sig) + + if content is None: + return amqp_method(channel, args) + else: + return amqp_method(channel, args, content) + + def read_timeout(self, timeout=None): + if timeout is None: + return self.method_reader.read_method() + sock = self.transport.sock + prev = sock.gettimeout() + if prev != timeout: + sock.settimeout(timeout) + try: + try: + return self.method_reader.read_method() + except SSLError as exc: + # http://bugs.python.org/issue10272 + if 'timed out' in str(exc): + raise socket.timeout() + # Non-blocking SSL sockets can throw SSLError + if 'The operation did not complete' in str(exc): + raise socket.timeout() + raise + finally: + if prev != timeout: + sock.settimeout(prev) + + def _wait_multiple(self, channels, allowed_methods, timeout=None): + for channel_id, channel in items(channels): + method_queue = channel.method_queue + for queued_method in method_queue: + method_sig = queued_method[0] + if (allowed_methods is None or + method_sig in allowed_methods or + method_sig == (20, 40)): + method_queue.remove(queued_method) + method_sig, args, content = queued_method + return channel_id, method_sig, args, content + + # Nothing queued, need to wait for a method from the peer + read_timeout = self.read_timeout + wait = self.wait + while 1: + channel, method_sig, args, content = read_timeout(timeout) + + if (channel in channels and + allowed_methods is None or + method_sig in allowed_methods or + method_sig == (20, 40)): + return channel, method_sig, args, content + + # Not the channel and/or method we were looking for. Queue + # this method for later + channels[channel].method_queue.append((method_sig, args, content)) + + # + # If we just queued up a method for channel 0 (the Connection + # itself) it's probably a close method in reaction to some + # error, so deal with it right away. + # + if channel == 0: + wait() + + def channel(self, channel_id=None): + try: + return self.channels[channel_id] + except KeyError: + return Channel(self, channel_id) + + +class Message(base.Message): + + def __init__(self, channel, msg, **kwargs): + props = msg.properties + super(Message, self).__init__( + channel, + body=msg.body, + delivery_tag=msg.delivery_tag, + content_type=props.get('content_type'), + content_encoding=props.get('content_encoding'), + delivery_info=msg.delivery_info, + properties=msg.properties, + headers=props.get('application_headers') or {}, + **kwargs) + + +class Channel(_Channel, base.StdChannel): + Message = Message + events = {'basic_return': set()} + + def __init__(self, *args, **kwargs): + self.no_ack_consumers = set() + super(Channel, self).__init__(*args, **kwargs) + + def prepare_message(self, body, priority=None, content_type=None, + content_encoding=None, headers=None, properties=None): + """Encapsulate data into a AMQP message.""" + return amqp.Message(body, priority=priority, + content_type=content_type, + content_encoding=content_encoding, + application_headers=headers, + **properties) + + def message_to_python(self, raw_message): + """Convert encoded message body back to a Python value.""" + return self.Message(self, raw_message) + + def close(self): + try: + super(Channel, self).close() + finally: + self.connection = None + + def basic_consume(self, *args, **kwargs): + consumer_tag = super(Channel, self).basic_consume(*args, **kwargs) + if kwargs['no_ack']: + self.no_ack_consumers.add(consumer_tag) + return consumer_tag + + def basic_cancel(self, consumer_tag, **kwargs): + self.no_ack_consumers.discard(consumer_tag) + return super(Channel, self).basic_cancel(consumer_tag, **kwargs) + + +class Transport(base.Transport): + Connection = Connection + + default_port = DEFAULT_PORT + + # it's very annoying that amqplib sometimes raises AttributeError + # if the connection is lost, but nothing we can do about that here. + connection_errors = ( + base.Transport.connection_errors + ( + AMQPConnectionException, + socket.error, IOError, OSError, AttributeError) + ) + channel_errors = base.Transport.channel_errors + (AMQPChannelException, ) + + driver_name = 'amqplib' + driver_type = 'amqp' + supports_ev = True + + def __init__(self, client, **kwargs): + self.client = client + self.default_port = kwargs.get('default_port') or self.default_port + + if amqp is NA: + raise ImportError('Missing amqplib library (pip install amqplib)') + + def create_channel(self, connection): + return connection.channel() + + def drain_events(self, connection, **kwargs): + return connection.drain_events(**kwargs) + + def establish_connection(self): + """Establish connection to the AMQP broker.""" + conninfo = self.client + for name, default_value in items(self.default_connection_params): + if not getattr(conninfo, name, None): + setattr(conninfo, name, default_value) + if conninfo.hostname == 'localhost': + conninfo.hostname = '127.0.0.1' + conn = self.Connection(host=conninfo.host, + userid=conninfo.userid, + password=conninfo.password, + login_method=conninfo.login_method, + virtual_host=conninfo.virtual_host, + insist=conninfo.insist, + ssl=conninfo.ssl, + connect_timeout=conninfo.connect_timeout) + conn.client = self.client + return conn + + def close_connection(self, connection): + """Close the AMQP broker connection.""" + connection.client = None + connection.close() + + def is_alive(self, connection): + if HAS_MSG_PEEK: + sock = connection.transport.sock + prev = sock.gettimeout() + sock.settimeout(0.0001) + try: + sock.recv(1, socket.MSG_PEEK) + except socket.timeout: + pass + except socket.error: + return False + finally: + sock.settimeout(prev) + return True + + def verify_connection(self, connection): + return connection.channels is not None and self.is_alive(connection) + + def register_with_event_loop(self, connection, loop): + loop.add_reader(connection.method_reader.source.sock, + self.on_readable, connection, loop) + + @property + def default_connection_params(self): + return {'userid': 'guest', 'password': 'guest', + 'port': self.default_port, + 'hostname': 'localhost', 'login_method': 'AMQPLAIN'} + + def get_manager(self, *args, **kwargs): + return get_manager(self.client, *args, **kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/base.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/base.py new file mode 100644 index 0000000..fe232d8 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/base.py @@ -0,0 +1,175 @@ +""" +kombu.transport.base +==================== + +Base transport interface. + +""" +from __future__ import absolute_import + +import errno +import socket + +from amqp.exceptions import RecoverableConnectionError + +from kombu.exceptions import ChannelError, ConnectionError +from kombu.message import Message +from kombu.utils import cached_property +from kombu.utils.compat import get_errno + +__all__ = ['Message', 'StdChannel', 'Management', 'Transport'] + + +def _LeftBlank(obj, method): + return NotImplementedError( + 'Transport {0.__module__}.{0.__name__} does not implement {1}'.format( + obj.__class__, method)) + + +class StdChannel(object): + no_ack_consumers = None + + def Consumer(self, *args, **kwargs): + from kombu.messaging import Consumer + return Consumer(self, *args, **kwargs) + + def Producer(self, *args, **kwargs): + from kombu.messaging import Producer + return Producer(self, *args, **kwargs) + + def get_bindings(self): + raise _LeftBlank(self, 'get_bindings') + + def after_reply_message_received(self, queue): + """reply queue semantics: can be used to delete the queue + after transient reply message received.""" + pass + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + + +class Management(object): + + def __init__(self, transport): + self.transport = transport + + def get_bindings(self): + raise _LeftBlank(self, 'get_bindings') + + +class Transport(object): + """Base class for transports.""" + Management = Management + + #: The :class:`~kombu.Connection` owning this instance. + client = None + + #: Set to True if :class:`~kombu.Connection` should pass the URL + #: unmodified. + can_parse_url = False + + #: Default port used when no port has been specified. + default_port = None + + #: Tuple of errors that can happen due to connection failure. + connection_errors = (ConnectionError, ) + + #: Tuple of errors that can happen due to channel/method failure. + channel_errors = (ChannelError, ) + + #: Type of driver, can be used to separate transports + #: using the AMQP protocol (driver_type: 'amqp'), + #: Redis (driver_type: 'redis'), etc... + driver_type = 'N/A' + + #: Name of driver library (e.g. 'py-amqp', 'redis', 'beanstalkc'). + driver_name = 'N/A' + + #: Whether this transports support heartbeats, + #: and that the :meth:`heartbeat_check` method has any effect. + supports_heartbeats = False + + #: Set to true if the transport supports the AIO interface. + supports_ev = False + + __reader = None + + def __init__(self, client, **kwargs): + self.client = client + + def establish_connection(self): + raise _LeftBlank(self, 'establish_connection') + + def close_connection(self, connection): + raise _LeftBlank(self, 'close_connection') + + def create_channel(self, connection): + raise _LeftBlank(self, 'create_channel') + + def close_channel(self, connection): + raise _LeftBlank(self, 'close_channel') + + def drain_events(self, connection, **kwargs): + raise _LeftBlank(self, 'drain_events') + + def heartbeat_check(self, connection, rate=2): + pass + + def driver_version(self): + return 'N/A' + + def get_heartbeat_interval(self, connection): + return 0 + + def register_with_event_loop(self, loop): + pass + + def unregister_from_event_loop(self, loop): + pass + + def verify_connection(self, connection): + return True + + def _make_reader(self, connection, timeout=socket.timeout, + error=socket.error, get_errno=get_errno, + _unavail=(errno.EAGAIN, errno.EINTR)): + drain_events = connection.drain_events + + def _read(loop): + if not connection.connected: + raise RecoverableConnectionError('Socket was disconnected') + try: + drain_events(timeout=0) + except timeout: + return + except error as exc: + if get_errno(exc) in _unavail: + return + raise + loop.call_soon(_read, loop) + + return _read + + def qos_semantics_matches_spec(self, connection): + return True + + def on_readable(self, connection, loop): + reader = self.__reader + if reader is None: + reader = self.__reader = self._make_reader(connection) + reader(loop) + + @property + def default_connection_params(self): + return {} + + def get_manager(self, *args, **kwargs): + return self.Management(self) + + @cached_property + def manager(self): + return self.get_manager() diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/beanstalk.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/beanstalk.py new file mode 100644 index 0000000..4e73bbc --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/beanstalk.py @@ -0,0 +1,155 @@ +""" +kombu.transport.beanstalk +========================= + +Beanstalk transport. + +:copyright: (c) 2010 - 2013 by David Ziegler. +:license: BSD, see LICENSE for more details. + +""" +from __future__ import absolute_import + +import socket + +from anyjson import loads, dumps + +from kombu.five import Empty +from kombu.utils.encoding import bytes_to_str + +from . import virtual + +try: + import beanstalkc +except ImportError: # pragma: no cover + beanstalkc = None # noqa + +DEFAULT_PORT = 11300 + +__author__ = 'David Ziegler ' + + +class Channel(virtual.Channel): + _client = None + + def _parse_job(self, job): + item, dest = None, None + if job: + try: + item = loads(bytes_to_str(job.body)) + dest = job.stats()['tube'] + except Exception: + job.bury() + else: + job.delete() + else: + raise Empty() + return item, dest + + def _put(self, queue, message, **kwargs): + extra = {} + priority = message['properties']['delivery_info']['priority'] + ttr = message['properties'].get('ttr') + if ttr is not None: + extra['ttr'] = ttr + + self.client.use(queue) + self.client.put(dumps(message), priority=priority, **extra) + + def _get(self, queue): + if queue not in self.client.watching(): + self.client.watch(queue) + + [self.client.ignore(active) for active in self.client.watching() + if active != queue] + + job = self.client.reserve(timeout=1) + item, dest = self._parse_job(job) + return item + + def _get_many(self, queues, timeout=1): + # timeout of None will cause beanstalk to timeout waiting + # for a new request + if timeout is None: + timeout = 1 + + watching = self.client.watching() + + [self.client.watch(active) for active in queues + if active not in watching] + + [self.client.ignore(active) for active in watching + if active not in queues] + + job = self.client.reserve(timeout=timeout) + return self._parse_job(job) + + def _purge(self, queue): + if queue not in self.client.watching(): + self.client.watch(queue) + + [self.client.ignore(active) + for active in self.client.watching() + if active != queue] + count = 0 + while 1: + job = self.client.reserve(timeout=1) + if job: + job.delete() + count += 1 + else: + break + return count + + def _size(self, queue): + return 0 + + def _open(self): + conninfo = self.connection.client + host = conninfo.hostname or 'localhost' + port = conninfo.port or DEFAULT_PORT + conn = beanstalkc.Connection(host=host, port=port) + conn.connect() + return conn + + def close(self): + if self._client is not None: + return self._client.close() + super(Channel, self).close() + + @property + def client(self): + if self._client is None: + self._client = self._open() + return self._client + + +class Transport(virtual.Transport): + Channel = Channel + + polling_interval = 1 + default_port = DEFAULT_PORT + connection_errors = ( + virtual.Transport.connection_errors + ( + socket.error, IOError, + getattr(beanstalkc, 'SocketError', None), + ) + ) + channel_errors = ( + virtual.Transport.channel_errors + ( + socket.error, IOError, + getattr(beanstalkc, 'SocketError', None), + getattr(beanstalkc, 'BeanstalkcException', None), + ) + ) + driver_type = 'beanstalk' + driver_name = 'beanstalkc' + + def __init__(self, *args, **kwargs): + if beanstalkc is None: + raise ImportError( + 'Missing beanstalkc library (pip install beanstalkc)') + super(Transport, self).__init__(*args, **kwargs) + + def driver_version(self): + return beanstalkc.__version__ diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/couchdb.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/couchdb.py new file mode 100644 index 0000000..99d1362 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/couchdb.py @@ -0,0 +1,142 @@ +""" +kombu.transport.couchdb +======================= + +CouchDB transport. + +:copyright: (c) 2010 - 2013 by David Clymer. +:license: BSD, see LICENSE for more details. + +""" +from __future__ import absolute_import + +import socket + +from anyjson import loads, dumps + +from kombu.five import Empty +from kombu.utils import uuid4 +from kombu.utils.encoding import bytes_to_str + +from . import virtual + +try: + import couchdb +except ImportError: # pragma: no cover + couchdb = None # noqa + +DEFAULT_PORT = 5984 +DEFAULT_DATABASE = 'kombu_default' + +__author__ = 'David Clymer ' + + +def create_message_view(db): + from couchdb import design + + view = design.ViewDefinition('kombu', 'messages', """ + function (doc) { + if (doc.queue && doc.payload) + emit(doc.queue, doc); + } + """) + if not view.get_doc(db): + view.sync(db) + + +class Channel(virtual.Channel): + _client = None + + view_created = False + + def _put(self, queue, message, **kwargs): + self.client.save({'_id': uuid4().hex, + 'queue': queue, + 'payload': dumps(message)}) + + def _get(self, queue): + result = self._query(queue, limit=1) + if not result: + raise Empty() + + item = result.rows[0].value + self.client.delete(item) + return loads(bytes_to_str(item['payload'])) + + def _purge(self, queue): + result = self._query(queue) + for item in result: + self.client.delete(item.value) + return len(result) + + def _size(self, queue): + return len(self._query(queue)) + + def _open(self): + conninfo = self.connection.client + dbname = conninfo.virtual_host + proto = conninfo.ssl and 'https' or 'http' + if not dbname or dbname == '/': + dbname = DEFAULT_DATABASE + port = conninfo.port or DEFAULT_PORT + server = couchdb.Server('%s://%s:%s/' % (proto, + conninfo.hostname, + port)) + # Use username and password if avaliable + try: + if conninfo.userid: + server.resource.credentials = (conninfo.userid, + conninfo.password) + except AttributeError: + pass + try: + return server[dbname] + except couchdb.http.ResourceNotFound: + return server.create(dbname) + + def _query(self, queue, **kwargs): + if not self.view_created: + # if the message view is not yet set up, we'll need it now. + create_message_view(self.client) + self.view_created = True + return self.client.view('kombu/messages', key=queue, **kwargs) + + @property + def client(self): + if self._client is None: + self._client = self._open() + return self._client + + +class Transport(virtual.Transport): + Channel = Channel + + polling_interval = 1 + default_port = DEFAULT_PORT + connection_errors = ( + virtual.Transport.connection_errors + ( + socket.error, + getattr(couchdb, 'HTTPError', None), + getattr(couchdb, 'ServerError', None), + getattr(couchdb, 'Unauthorized', None), + ) + ) + channel_errors = ( + virtual.Transport.channel_errors + ( + getattr(couchdb, 'HTTPError', None), + getattr(couchdb, 'ServerError', None), + getattr(couchdb, 'PreconditionFailed', None), + getattr(couchdb, 'ResourceConflict', None), + getattr(couchdb, 'ResourceNotFound', None), + ) + ) + driver_type = 'couchdb' + driver_name = 'couchdb' + + def __init__(self, *args, **kwargs): + if couchdb is None: + raise ImportError('Missing couchdb library (pip install couchdb)') + super(Transport, self).__init__(*args, **kwargs) + + def driver_version(self): + return couchdb.__version__ diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/django/__init__.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/__init__.py new file mode 100644 index 0000000..d3d5362 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/__init__.py @@ -0,0 +1,83 @@ +"""Kombu transport using the Django database as a message store.""" +from __future__ import absolute_import + +from anyjson import loads, dumps + +from django.conf import settings +from django.core import exceptions as errors + +from kombu.five import Empty +from kombu.transport import virtual +from kombu.utils import cached_property, symbol_by_name +from kombu.utils.encoding import bytes_to_str + +try: + from django.apps import AppConfig +except ImportError: # pragma: no cover + pass +else: + class KombuAppConfig(AppConfig): + name = 'kombu.transport.django' + label = name.replace('.', '_') + verbose_name = 'Message queue' + default_app_config = 'kombu.transport.django.KombuAppConfig' + +VERSION = (1, 0, 0) +__version__ = '.'.join(map(str, VERSION)) + +POLLING_INTERVAL = getattr(settings, 'KOMBU_POLLING_INTERVAL', + getattr(settings, 'DJKOMBU_POLLING_INTERVAL', 5.0)) + + +class Channel(virtual.Channel): + queue_model = 'kombu.transport.django.models:Queue' + + def _new_queue(self, queue, **kwargs): + self.Queue.objects.get_or_create(name=queue) + + def _put(self, queue, message, **kwargs): + self.Queue.objects.publish(queue, dumps(message)) + + def basic_consume(self, queue, *args, **kwargs): + qinfo = self.state.bindings[queue] + exchange = qinfo[0] + if self.typeof(exchange).type == 'fanout': + return + super(Channel, self).basic_consume(queue, *args, **kwargs) + + def _get(self, queue): + m = self.Queue.objects.fetch(queue) + if m: + return loads(bytes_to_str(m)) + raise Empty() + + def _size(self, queue): + return self.Queue.objects.size(queue) + + def _purge(self, queue): + return self.Queue.objects.purge(queue) + + def refresh_connection(self): + from django import db + db.close_connection() + + @cached_property + def Queue(self): + return symbol_by_name(self.queue_model) + + +class Transport(virtual.Transport): + Channel = Channel + + default_port = 0 + polling_interval = POLLING_INTERVAL + channel_errors = ( + virtual.Transport.channel_errors + ( + errors.ObjectDoesNotExist, errors.MultipleObjectsReturned) + ) + driver_type = 'sql' + driver_name = 'django' + + def driver_version(self): + import django + return '.'.join(map(str, django.VERSION)) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/django/management/__init__.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/management/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/django/management/commands/__init__.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/management/commands/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/django/management/commands/clean_kombu_messages.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/management/commands/clean_kombu_messages.py new file mode 100644 index 0000000..c82ba9f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/management/commands/clean_kombu_messages.py @@ -0,0 +1,22 @@ +from __future__ import absolute_import + +from django.core.management.base import BaseCommand + + +def pluralize(desc, value): + if value > 1: + return desc + 's' + return desc + + +class Command(BaseCommand): + requires_model_validation = True + + def handle(self, *args, **options): + from kombu.transport.django.models import Message + + count = Message.objects.filter(visible=False).count() + + print('Removing {0} invisible {1} from database... '.format( + count, pluralize('message', count))) + Message.objects.cleanup() diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/django/managers.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/managers.py new file mode 100644 index 0000000..d33d290 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/managers.py @@ -0,0 +1,95 @@ +from __future__ import absolute_import + +from functools import wraps + +from django.db import transaction, connection, models +try: + from django.db import connections, router +except ImportError: # pre-Django 1.2 + connections = router = None # noqa + + +try: + transaction.atomic +except AttributeError: + commit_on_success = transaction.commit_on_success +else: + def commit_on_success(fun): + @wraps(fun) + def _commit(*args, **kwargs): + with transaction.atomic(): + return fun(*args, **kwargs) + return _commit + + +class QueueManager(models.Manager): + + def publish(self, queue_name, payload): + queue, created = self.get_or_create(name=queue_name) + queue.messages.create(payload=payload) + + def fetch(self, queue_name): + try: + queue = self.get(name=queue_name) + except self.model.DoesNotExist: + return + + return queue.messages.pop() + + def size(self, queue_name): + return self.get(name=queue_name).messages.count() + + def purge(self, queue_name): + try: + queue = self.get(name=queue_name) + except self.model.DoesNotExist: + return + + messages = queue.messages.all() + count = messages.count() + messages.delete() + return count + + +def select_for_update(qs): + if connection.vendor == 'oracle': + return qs + try: + return qs.select_for_update() + except AttributeError: + return qs + + +class MessageManager(models.Manager): + _messages_received = [0] + cleanup_every = 10 + + @commit_on_success + def pop(self): + try: + resultset = select_for_update( + self.filter(visible=True).order_by('sent_at', 'id') + ) + result = resultset[0:1].get() + result.visible = False + result.save() + recv = self.__class__._messages_received + recv[0] += 1 + if not recv[0] % self.cleanup_every: + self.cleanup() + return result.payload + except self.model.DoesNotExist: + pass + + def cleanup(self): + cursor = self.connection_for_write().cursor() + cursor.execute( + 'DELETE FROM %s WHERE visible=%%s' % ( + self.model._meta.db_table, ), + (False, ) + ) + + def connection_for_write(self): + if connections: + return connections[router.db_for_write(self.model)] + return connection diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/django/migrations/0001_initial.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/migrations/0001_initial.py new file mode 100644 index 0000000..789d988 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/migrations/0001_initial.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import, unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='Message', + fields=[ + ('id', models.AutoField( + verbose_name='ID', serialize=False, + auto_created=True, primary_key=True)), + ('visible', models.BooleanField(default=True, db_index=True)), + ('sent_at', models.DateTimeField( + db_index=True, auto_now_add=True, null=True)), + ('payload', models.TextField(verbose_name='payload')), + ], + options={ + 'db_table': 'djkombu_message', + 'verbose_name': 'message', + 'verbose_name_plural': 'messages', + }, + ), + migrations.CreateModel( + name='Queue', + fields=[ + ('id', models.AutoField( + verbose_name='ID', serialize=False, + auto_created=True, primary_key=True)), + ('name', models.CharField( + unique=True, max_length=200, verbose_name='name')), + ], + options={ + 'db_table': 'djkombu_queue', + 'verbose_name': 'queue', + 'verbose_name_plural': 'queues', + }, + ), + migrations.AddField( + model_name='message', + name='queue', + field=models.ForeignKey(on_delete=models.PROTECT, related_name='messages', to='kombu_transport_django.Queue'), + ), + ] diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/django/migrations/0002_auto_20181021_1329.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/migrations/0002_auto_20181021_1329.py new file mode 100644 index 0000000..c4231dd --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/migrations/0002_auto_20181021_1329.py @@ -0,0 +1,19 @@ +# Generated by Django 2.1 on 2018-10-21 11:29 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('kombu_transport_django', '0001_initial'), + ] + + operations = [ + migrations.AlterField( + model_name='message', + name='queue', + field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='messages', to='kombu_transport_django.Queue'), + ), + ] diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/django/migrations/__init__.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/migrations/__init__.py new file mode 100644 index 0000000..6b92838 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/migrations/__init__.py @@ -0,0 +1,16 @@ +from __future__ import absolute_import + +SOUTH_ERROR_MESSAGE = """ +For South support, customize the SOUTH_MIGRATION_MODULES setting +to point to the correct migrations module: + + SOUTH_MIGRATION_MODULES = { + 'kombu_transport_django': 'kombu.transport.django.south_migrations', + } +""" + +try: + from django.db import migrations # noqa +except ImportError: + from django.core.exceptions import ImproperlyConfigured + raise ImproperlyConfigured(SOUTH_ERROR_MESSAGE) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/django/models.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/models.py new file mode 100644 index 0000000..42e24af --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/models.py @@ -0,0 +1,38 @@ +from __future__ import absolute_import + +import django + +from django.db import models +from django.utils.translation import ugettext_lazy as _ + +from .managers import QueueManager, MessageManager + + +class Queue(models.Model): + name = models.CharField(_('name'), max_length=200, unique=True) + + objects = QueueManager() + + class Meta: + if django.VERSION >= (1, 7): + app_label = 'kombu_transport_django' + db_table = 'djkombu_queue' + verbose_name = _('queue') + verbose_name_plural = _('queues') + + +class Message(models.Model): + visible = models.BooleanField(default=True, db_index=True) + sent_at = models.DateTimeField(null=True, blank=True, db_index=True, + auto_now_add=True) + payload = models.TextField(_('payload'), null=False) + queue = models.ForeignKey(Queue, related_name='messages', on_delete=models.DO_NOTHING) + + objects = MessageManager() + + class Meta: + if django.VERSION >= (1, 7): + app_label = 'kombu_transport_django' + db_table = 'djkombu_message' + verbose_name = _('message') + verbose_name_plural = _('messages') \ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/django/south_migrations/0001_initial.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/south_migrations/0001_initial.py new file mode 100644 index 0000000..ea1edb0 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/south_migrations/0001_initial.py @@ -0,0 +1,57 @@ +# encoding: utf-8 +from __future__ import absolute_import + +# flake8: noqa +import datetime +from south.db import db +from south.v2 import SchemaMigration +from django.db import models + +class Migration(SchemaMigration): + + def forwards(self, orm): + + # Adding model 'Queue' + db.create_table('djkombu_queue', ( + ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), + ('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=200)), + )) + db.send_create_signal('django', ['Queue']) + + # Adding model 'Message' + db.create_table('djkombu_message', ( + ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), + ('visible', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)), + ('sent_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, db_index=True, blank=True)), + ('payload', self.gf('django.db.models.fields.TextField')()), + ('queue', self.gf('django.db.models.fields.related.ForeignKey')(related_name='messages', to=orm['django.Queue'])), + )) + db.send_create_signal('django', ['Message']) + + + def backwards(self, orm): + + # Deleting model 'Queue' + db.delete_table('djkombu_queue') + + # Deleting model 'Message' + db.delete_table('djkombu_message') + + + models = { + 'django.message': { + 'Meta': {'object_name': 'Message', 'db_table': "'djkombu_message'"}, + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'payload': ('django.db.models.fields.TextField', [], {}), + 'queue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['django.Queue']"}), + 'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), + 'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}) + }, + 'django.queue': { + 'Meta': {'object_name': 'Queue', 'db_table': "'djkombu_queue'"}, + 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), + 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}) + } + } + + complete_apps = ['django'] diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/django/south_migrations/__init__.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/django/south_migrations/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/filesystem.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/filesystem.py new file mode 100644 index 0000000..c83dcdc --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/filesystem.py @@ -0,0 +1,193 @@ +""" +kombu.transport.filesystem +========================== + +Transport using the file system as the message store. + +""" +from __future__ import absolute_import + +from anyjson import loads, dumps + +import os +import shutil +import uuid +import tempfile + +from . import virtual +from kombu.exceptions import ChannelError +from kombu.five import Empty, monotonic +from kombu.utils import cached_property +from kombu.utils.encoding import bytes_to_str, str_to_bytes + +VERSION = (1, 0, 0) +__version__ = '.'.join(map(str, VERSION)) + +# needs win32all to work on Windows +if os.name == 'nt': + + import win32con + import win32file + import pywintypes + + LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK + # 0 is the default + LOCK_SH = 0 # noqa + LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY # noqa + __overlapped = pywintypes.OVERLAPPED() + + def lock(file, flags): + hfile = win32file._get_osfhandle(file.fileno()) + win32file.LockFileEx(hfile, flags, 0, 0xffff0000, __overlapped) + + def unlock(file): + hfile = win32file._get_osfhandle(file.fileno()) + win32file.UnlockFileEx(hfile, 0, 0xffff0000, __overlapped) + +elif os.name == 'posix': + + import fcntl + from fcntl import LOCK_EX, LOCK_SH, LOCK_NB # noqa + + def lock(file, flags): # noqa + fcntl.flock(file.fileno(), flags) + + def unlock(file): # noqa + fcntl.flock(file.fileno(), fcntl.LOCK_UN) +else: + raise RuntimeError( + 'Filesystem plugin only defined for NT and POSIX platforms') + + +class Channel(virtual.Channel): + + def _put(self, queue, payload, **kwargs): + """Put `message` onto `queue`.""" + + filename = '%s_%s.%s.msg' % (int(round(monotonic() * 1000)), + uuid.uuid4(), queue) + filename = os.path.join(self.data_folder_out, filename) + + try: + f = open(filename, 'wb') + lock(f, LOCK_EX) + f.write(str_to_bytes(dumps(payload))) + except (IOError, OSError): + raise ChannelError( + 'Cannot add file {0!r} to directory'.format(filename)) + finally: + unlock(f) + f.close() + + def _get(self, queue): + """Get next message from `queue`.""" + + queue_find = '.' + queue + '.msg' + folder = os.listdir(self.data_folder_in) + folder = sorted(folder) + while len(folder) > 0: + filename = folder.pop(0) + + # only handle message for the requested queue + if filename.find(queue_find) < 0: + continue + + if self.store_processed: + processed_folder = self.processed_folder + else: + processed_folder = tempfile.gettempdir() + + try: + # move the file to the tmp/processed folder + shutil.move(os.path.join(self.data_folder_in, filename), + processed_folder) + except IOError: + pass # file could be locked, or removed in meantime so ignore + + filename = os.path.join(processed_folder, filename) + try: + f = open(filename, 'rb') + payload = f.read() + f.close() + if not self.store_processed: + os.remove(filename) + except (IOError, OSError): + raise ChannelError( + 'Cannot read file {0!r} from queue.'.format(filename)) + + return loads(bytes_to_str(payload)) + + raise Empty() + + def _purge(self, queue): + """Remove all messages from `queue`.""" + count = 0 + queue_find = '.' + queue + '.msg' + + folder = os.listdir(self.data_folder_in) + while len(folder) > 0: + filename = folder.pop() + try: + # only purge messages for the requested queue + if filename.find(queue_find) < 0: + continue + + filename = os.path.join(self.data_folder_in, filename) + os.remove(filename) + + count += 1 + + except OSError: + # we simply ignore its existence, as it was probably + # processed by another worker + pass + + return count + + def _size(self, queue): + """Return the number of messages in `queue` as an :class:`int`.""" + count = 0 + + queue_find = '.{0}.msg'.format(queue) + folder = os.listdir(self.data_folder_in) + while len(folder) > 0: + filename = folder.pop() + + # only handle message for the requested queue + if filename.find(queue_find) < 0: + continue + + count += 1 + + return count + + @property + def transport_options(self): + return self.connection.client.transport_options + + @cached_property + def data_folder_in(self): + return self.transport_options.get('data_folder_in', 'data_in') + + @cached_property + def data_folder_out(self): + return self.transport_options.get('data_folder_out', 'data_out') + + @cached_property + def store_processed(self): + return self.transport_options.get('store_processed', False) + + @cached_property + def processed_folder(self): + return self.transport_options.get('processed_folder', 'processed') + + +class Transport(virtual.Transport): + Channel = Channel + + default_port = 0 + driver_type = 'filesystem' + driver_name = 'filesystem' + + def driver_version(self): + return 'N/A' diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/librabbitmq.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/librabbitmq.py new file mode 100644 index 0000000..f0de360 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/librabbitmq.py @@ -0,0 +1,176 @@ +""" +kombu.transport.librabbitmq +=========================== + +`librabbitmq`_ transport. + +.. _`librabbitmq`: http://pypi.python.org/librabbitmq/ + +""" +from __future__ import absolute_import + +import os +import socket +import warnings + +import librabbitmq as amqp +from librabbitmq import ChannelError, ConnectionError + +from kombu.five import items, values +from kombu.utils.amq_manager import get_manager +from kombu.utils.text import version_string_as_tuple + +from . import base + +W_VERSION = """ + librabbitmq version too old to detect RabbitMQ version information + so make sure you are using librabbitmq 1.5 when using rabbitmq > 3.3 +""" +DEFAULT_PORT = 5672 +DEFAULT_SSL_PORT = 5671 + +NO_SSL_ERROR = """\ +ssl not supported by librabbitmq, please use pyamqp:// or stunnel\ +""" + + +class Message(base.Message): + + def __init__(self, channel, props, info, body): + super(Message, self).__init__( + channel, + body=body, + delivery_info=info, + properties=props, + delivery_tag=info.get('delivery_tag'), + content_type=props.get('content_type'), + content_encoding=props.get('content_encoding'), + headers=props.get('headers')) + + +class Channel(amqp.Channel, base.StdChannel): + Message = Message + + def prepare_message(self, body, priority=None, + content_type=None, content_encoding=None, + headers=None, properties=None): + """Encapsulate data into a AMQP message.""" + properties = properties if properties is not None else {} + properties.update({'content_type': content_type, + 'content_encoding': content_encoding, + 'headers': headers, + 'priority': priority}) + return body, properties + + +class Connection(amqp.Connection): + Channel = Channel + Message = Message + + +class Transport(base.Transport): + Connection = Connection + + default_port = DEFAULT_PORT + default_ssl_port = DEFAULT_SSL_PORT + + connection_errors = ( + base.Transport.connection_errors + ( + ConnectionError, socket.error, IOError, OSError) + ) + channel_errors = ( + base.Transport.channel_errors + (ChannelError, ) + ) + driver_type = 'amqp' + driver_name = 'librabbitmq' + + supports_ev = True + + def __init__(self, client, **kwargs): + self.client = client + self.default_port = kwargs.get('default_port') or self.default_port + self.default_ssl_port = (kwargs.get('default_ssl_port') or + self.default_ssl_port) + self.__reader = None + + def driver_version(self): + return amqp.__version__ + + def create_channel(self, connection): + return connection.channel() + + def drain_events(self, connection, **kwargs): + return connection.drain_events(**kwargs) + + def establish_connection(self): + """Establish connection to the AMQP broker.""" + conninfo = self.client + for name, default_value in items(self.default_connection_params): + if not getattr(conninfo, name, None): + setattr(conninfo, name, default_value) + if conninfo.ssl: + raise NotImplementedError(NO_SSL_ERROR) + opts = dict({ + 'host': conninfo.host, + 'userid': conninfo.userid, + 'password': conninfo.password, + 'virtual_host': conninfo.virtual_host, + 'login_method': conninfo.login_method, + 'insist': conninfo.insist, + 'ssl': conninfo.ssl, + 'connect_timeout': conninfo.connect_timeout, + }, **conninfo.transport_options or {}) + conn = self.Connection(**opts) + conn.client = self.client + self.client.drain_events = conn.drain_events + return conn + + def close_connection(self, connection): + """Close the AMQP broker connection.""" + self.client.drain_events = None + connection.close() + + def _collect(self, connection): + if connection is not None: + for channel in values(connection.channels): + channel.connection = None + try: + os.close(connection.fileno()) + except OSError: + pass + connection.channels.clear() + connection.callbacks.clear() + self.client.drain_events = None + self.client = None + + def verify_connection(self, connection): + return connection.connected + + def register_with_event_loop(self, connection, loop): + loop.add_reader( + connection.fileno(), self.on_readable, connection, loop, + ) + + def get_manager(self, *args, **kwargs): + return get_manager(self.client, *args, **kwargs) + + def qos_semantics_matches_spec(self, connection): + try: + props = connection.server_properties + except AttributeError: + warnings.warn(UserWarning(W_VERSION)) + else: + if props.get('product') == 'RabbitMQ': + return version_string_as_tuple(props['version']) < (3, 3) + return True + + @property + def default_connection_params(self): + return { + 'userid': 'guest', + 'password': 'guest', + 'port': (self.default_ssl_port if self.client.ssl + else self.default_port), + 'hostname': 'localhost', + 'login_method': 'AMQPLAIN', + } diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/memory.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/memory.py new file mode 100644 index 0000000..b1ba70f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/memory.py @@ -0,0 +1,77 @@ +""" +kombu.transport.memory +====================== + +In-memory transport. + +""" +from __future__ import absolute_import + +from kombu.five import Queue, values + +from . import virtual + + +class Channel(virtual.Channel): + queues = {} + do_restore = False + supports_fanout = True + + def _has_queue(self, queue, **kwargs): + return queue in self.queues + + def _new_queue(self, queue, **kwargs): + if queue not in self.queues: + self.queues[queue] = Queue() + + def _get(self, queue, timeout=None): + return self._queue_for(queue).get(block=False) + + def _queue_for(self, queue): + if queue not in self.queues: + self.queues[queue] = Queue() + return self.queues[queue] + + def _queue_bind(self, *args): + pass + + def _put_fanout(self, exchange, message, routing_key=None, **kwargs): + for queue in self._lookup(exchange, routing_key): + self._queue_for(queue).put(message) + + def _put(self, queue, message, **kwargs): + self._queue_for(queue).put(message) + + def _size(self, queue): + return self._queue_for(queue).qsize() + + def _delete(self, queue, *args): + self.queues.pop(queue, None) + + def _purge(self, queue): + q = self._queue_for(queue) + size = q.qsize() + q.queue.clear() + return size + + def close(self): + super(Channel, self).close() + for queue in values(self.queues): + queue.empty() + self.queues = {} + + def after_reply_message_received(self, queue): + pass + + +class Transport(virtual.Transport): + Channel = Channel + + #: memory backend state is global. + state = virtual.BrokerState() + + driver_type = 'memory' + driver_name = 'memory' + + def driver_version(self): + return 'N/A' diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/mongodb.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/mongodb.py new file mode 100644 index 0000000..65b4a6d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/mongodb.py @@ -0,0 +1,338 @@ +""" +kombu.transport.mongodb +======================= + +MongoDB transport. + +:copyright: (c) 2010 - 2013 by Flavio Percoco Premoli. +:license: BSD, see LICENSE for more details. + +""" +from __future__ import absolute_import + +import pymongo + +from pymongo import errors +from anyjson import loads, dumps +from pymongo import MongoClient, uri_parser + +from kombu.five import Empty +from kombu.syn import _detect_environment +from kombu.utils.encoding import bytes_to_str + +from . import virtual + +try: + from pymongo.cursor import CursorType +except ImportError: + class CursorType(object): # noqa + pass + +DEFAULT_HOST = '127.0.0.1' +DEFAULT_PORT = 27017 + +DEFAULT_MESSAGES_COLLECTION = 'messages' +DEFAULT_ROUTING_COLLECTION = 'messages.routing' +DEFAULT_BROADCAST_COLLECTION = 'messages.broadcast' + + +class BroadcastCursor(object): + """Cursor for broadcast queues.""" + + def __init__(self, cursor): + self._cursor = cursor + + self.purge(rewind=False) + + def get_size(self): + return self._cursor.count() - self._offset + + def close(self): + self._cursor.close() + + def purge(self, rewind=True): + if rewind: + self._cursor.rewind() + + # Fast forward the cursor past old events + self._offset = self._cursor.count() + self._cursor = self._cursor.skip(self._offset) + + def __iter__(self): + return self + + def __next__(self): + while True: + try: + msg = next(self._cursor) + except pymongo.errors.OperationFailure as exc: + # In some cases tailed cursor can become invalid + # and have to be reinitalized + if 'not valid at server' in exc.message: + self.purge() + + continue + + raise + else: + break + + self._offset += 1 + + return msg + next = __next__ + + +class Channel(virtual.Channel): + _client = None + supports_fanout = True + _fanout_queues = {} + + def __init__(self, *vargs, **kwargs): + super(Channel, self).__init__(*vargs, **kwargs) + + self._broadcast_cursors = {} + + # Evaluate connection + self._create_client() + + def _new_queue(self, queue, **kwargs): + pass + + def _get(self, queue): + if queue in self._fanout_queues: + try: + msg = next(self.get_broadcast_cursor(queue)) + except StopIteration: + msg = None + else: + msg = self.get_messages().find_and_modify( + query={'queue': queue}, + sort={'_id': pymongo.ASCENDING}, + remove=True, + ) + + if msg is None: + raise Empty() + + return loads(bytes_to_str(msg['payload'])) + + def _size(self, queue): + if queue in self._fanout_queues: + return self.get_broadcast_cursor(queue).get_size() + + return self.get_messages().find({'queue': queue}).count() + + def _put(self, queue, message, **kwargs): + self.get_messages().insert({'payload': dumps(message), + 'queue': queue}) + + def _purge(self, queue): + size = self._size(queue) + + if queue in self._fanout_queues: + self.get_broadcaset_cursor(queue).purge() + else: + self.get_messages().remove({'queue': queue}) + + return size + + def _parse_uri(self, scheme='mongodb://'): + # See mongodb uri documentation: + # http://docs.mongodb.org/manual/reference/connection-string/ + client = self.connection.client + hostname = client.hostname + + if not hostname.startswith(scheme): + hostname = scheme + hostname + + if not hostname[len(scheme):]: + hostname += DEFAULT_HOST + + if client.userid and '@' not in hostname: + head, tail = hostname.split('://') + + credentials = client.userid + if client.password: + credentials += ':' + client.password + + hostname = head + '://' + credentials + '@' + tail + + port = client.port if client.port is not None else DEFAULT_PORT + + parsed = uri_parser.parse_uri(hostname, port) + + dbname = parsed['database'] or client.virtual_host + + if dbname in ('/', None): + dbname = 'kombu_default' + + options = { + 'auto_start_request': True, + 'ssl': client.ssl, + 'connectTimeoutMS': (int(client.connect_timeout * 1000) + if client.connect_timeout else None), + } + options.update(client.transport_options) + options.update(parsed['options']) + + return hostname, dbname, options + + def _prepare_client_options(self, options): + if pymongo.version_tuple >= (3, ): + options.pop('auto_start_request', None) + return options + + def _open(self, scheme='mongodb://'): + hostname, dbname, options = self._parse_uri(scheme=scheme) + + conf = self._prepare_client_options(options) + conf['host'] = hostname + + env = _detect_environment() + if env == 'gevent': + from gevent import monkey + monkey.patch_all() + elif env == 'eventlet': + from eventlet import monkey_patch + monkey_patch() + + mongoconn = MongoClient(**conf) + database = mongoconn[dbname] + + version = mongoconn.server_info()['version'] + if tuple(map(int, version.split('.')[:2])) < (1, 3): + raise NotImplementedError( + 'Kombu requires MongoDB version 1.3+ (server is {0})'.format( + version)) + + self._create_broadcast(database, options) + + self._client = database + + def _create_broadcast(self, database, options): + '''Create capped collection for broadcast messages.''' + if DEFAULT_BROADCAST_COLLECTION in database.collection_names(): + return + + capsize = options.get('capped_queue_size') or 100000 + database.create_collection(DEFAULT_BROADCAST_COLLECTION, + size=capsize, capped=True) + + def _ensure_indexes(self): + '''Ensure indexes on collections.''' + self.get_messages().ensure_index( + [('queue', 1), ('_id', 1)], background=True, + ) + self.get_broadcast().ensure_index([('queue', 1)]) + self.get_routing().ensure_index([('queue', 1), ('exchange', 1)]) + + # TODO Store a more complete exchange metatable in the routing collection + def get_table(self, exchange): + """Get table of bindings for ``exchange``.""" + localRoutes = frozenset(self.state.exchanges[exchange]['table']) + brokerRoutes = self.get_messages().routing.find( + {'exchange': exchange} + ) + + return localRoutes | frozenset((r['routing_key'], + r['pattern'], + r['queue']) for r in brokerRoutes) + + def _put_fanout(self, exchange, message, routing_key, **kwargs): + """Deliver fanout message.""" + self.get_broadcast().insert({'payload': dumps(message), + 'queue': exchange}) + + def _queue_bind(self, exchange, routing_key, pattern, queue): + if self.typeof(exchange).type == 'fanout': + self.create_broadcast_cursor(exchange, routing_key, pattern, queue) + self._fanout_queues[queue] = exchange + + meta = {'exchange': exchange, + 'queue': queue, + 'routing_key': routing_key, + 'pattern': pattern} + self.get_routing().update(meta, meta, upsert=True) + + def queue_delete(self, queue, **kwargs): + self.get_routing().remove({'queue': queue}) + + super(Channel, self).queue_delete(queue, **kwargs) + + if queue in self._fanout_queues: + try: + cursor = self._broadcast_cursors.pop(queue) + except KeyError: + pass + else: + cursor.close() + + self._fanout_queues.pop(queue) + + def _create_client(self): + self._open() + self._ensure_indexes() + + @property + def client(self): + if self._client is None: + self._create_client() + return self._client + + def get_messages(self): + return self.client[DEFAULT_MESSAGES_COLLECTION] + + def get_routing(self): + return self.client[DEFAULT_ROUTING_COLLECTION] + + def get_broadcast(self): + return self.client[DEFAULT_BROADCAST_COLLECTION] + + def get_broadcast_cursor(self, queue): + try: + return self._broadcast_cursors[queue] + except KeyError: + # Cursor may be absent when Channel created more than once. + # _fanout_queues is a class-level mutable attribute so it's + # shared over all Channel instances. + return self.create_broadcast_cursor( + self._fanout_queues[queue], None, None, queue, + ) + + def create_broadcast_cursor(self, exchange, routing_key, pattern, queue): + if pymongo.version_tuple >= (3, ): + query = dict(filter={'queue': exchange}, + sort=[('$natural', 1)], + cursor_type=CursorType.TAILABLE + ) + else: + query = dict(query={'queue': exchange}, + sort=[('$natural', 1)], + tailable=True + ) + + cursor = self.get_broadcast().find(**query) + ret = self._broadcast_cursors[queue] = BroadcastCursor(cursor) + return ret + + +class Transport(virtual.Transport): + Channel = Channel + + can_parse_url = True + polling_interval = 1 + default_port = DEFAULT_PORT + connection_errors = ( + virtual.Transport.connection_errors + (errors.ConnectionFailure, ) + ) + channel_errors = ( + virtual.Transport.channel_errors + ( + errors.ConnectionFailure, + errors.OperationFailure) + ) + driver_type = 'mongodb' + driver_name = 'pymongo' + + def driver_version(self): + return pymongo.version diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/pyamqp.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/pyamqp.py new file mode 100644 index 0000000..7111a39 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/pyamqp.py @@ -0,0 +1,155 @@ +""" +kombu.transport.pyamqp +====================== + +pure python amqp transport. + +""" +from __future__ import absolute_import + +import amqp + +from kombu.five import items +from kombu.utils.amq_manager import get_manager +from kombu.utils.text import version_string_as_tuple + +from . import base + +DEFAULT_PORT = 5672 +DEFAULT_SSL_PORT = 5671 + + +class Message(base.Message): + + def __init__(self, channel, msg, **kwargs): + props = msg.properties + super(Message, self).__init__( + channel, + body=msg.body, + delivery_tag=msg.delivery_tag, + content_type=props.get('content_type'), + content_encoding=props.get('content_encoding'), + delivery_info=msg.delivery_info, + properties=msg.properties, + headers=props.get('application_headers') or {}, + **kwargs) + + +class Channel(amqp.Channel, base.StdChannel): + Message = Message + + def prepare_message(self, body, priority=None, + content_type=None, content_encoding=None, + headers=None, properties=None, _Message=amqp.Message): + """Prepares message so that it can be sent using this transport.""" + return _Message( + body, + priority=priority, + content_type=content_type, + content_encoding=content_encoding, + application_headers=headers, + **properties or {} + ) + + def message_to_python(self, raw_message): + """Convert encoded message body back to a Python value.""" + return self.Message(self, raw_message) + + +class Connection(amqp.Connection): + Channel = Channel + + +class Transport(base.Transport): + Connection = Connection + + default_port = DEFAULT_PORT + default_ssl_port = DEFAULT_SSL_PORT + + # it's very annoying that pyamqp sometimes raises AttributeError + # if the connection is lost, but nothing we can do about that here. + connection_errors = amqp.Connection.connection_errors + channel_errors = amqp.Connection.channel_errors + recoverable_connection_errors = \ + amqp.Connection.recoverable_connection_errors + recoverable_channel_errors = amqp.Connection.recoverable_channel_errors + + driver_name = 'py-amqp' + driver_type = 'amqp' + supports_heartbeats = True + supports_ev = True + + def __init__(self, client, + default_port=None, default_ssl_port=None, **kwargs): + self.client = client + self.default_port = default_port or self.default_port + self.default_ssl_port = default_ssl_port or self.default_ssl_port + + def driver_version(self): + return amqp.__version__ + + def create_channel(self, connection): + return connection.channel() + + def drain_events(self, connection, **kwargs): + return connection.drain_events(**kwargs) + + def establish_connection(self): + """Establish connection to the AMQP broker.""" + conninfo = self.client + for name, default_value in items(self.default_connection_params): + if not getattr(conninfo, name, None): + setattr(conninfo, name, default_value) + if conninfo.hostname == 'localhost': + conninfo.hostname = '127.0.0.1' + opts = dict({ + 'host': conninfo.host, + 'userid': conninfo.userid, + 'password': conninfo.password, + 'login_method': conninfo.login_method, + 'virtual_host': conninfo.virtual_host, + 'insist': conninfo.insist, + 'ssl': conninfo.ssl, + 'connect_timeout': conninfo.connect_timeout, + 'heartbeat': conninfo.heartbeat, + }, **conninfo.transport_options or {}) + conn = self.Connection(**opts) + conn.client = self.client + return conn + + def verify_connection(self, connection): + return connection.connected + + def close_connection(self, connection): + """Close the AMQP broker connection.""" + connection.client = None + connection.close() + + def get_heartbeat_interval(self, connection): + return connection.heartbeat + + def register_with_event_loop(self, connection, loop): + loop.add_reader(connection.sock, self.on_readable, connection, loop) + + def heartbeat_check(self, connection, rate=2): + return connection.heartbeat_tick(rate=rate) + + def qos_semantics_matches_spec(self, connection): + props = connection.server_properties + if props.get('product') == 'RabbitMQ': + return version_string_as_tuple(props['version']) < (3, 3) + return True + + @property + def default_connection_params(self): + return { + 'userid': 'guest', + 'password': 'guest', + 'port': (self.default_ssl_port if self.client.ssl + else self.default_port), + 'hostname': 'localhost', + 'login_method': 'AMQPLAIN', + } + + def get_manager(self, *args, **kwargs): + return get_manager(self.client, *args, **kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/pyro.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/pyro.py new file mode 100644 index 0000000..b87a5fb --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/pyro.py @@ -0,0 +1,99 @@ +""" +kombu.transport.pyro +====================== + +Pyro transport. + +Requires the :mod:`Pyro4` library to be installed. + +""" +from __future__ import absolute_import + +import sys + +from kombu.five import reraise +from kombu.utils import cached_property + +from . import virtual + +try: + import Pyro4 as pyro + from Pyro4.errors import NamingError +except ImportError: # pragma: no cover + pyro = NamingError = None # noqa + +DEFAULT_PORT = 9090 +E_LOOKUP = """\ +Unable to locate pyro nameserver {0.virtual_host} on host {0.hostname}\ +""" + + +class Channel(virtual.Channel): + + def queues(self): + return self.shared_queues.get_queue_names() + + def _new_queue(self, queue, **kwargs): + if queue not in self.queues(): + self.shared_queues.new_queue(queue) + + def _get(self, queue, timeout=None): + queue = self._queue_for(queue) + msg = self.shared_queues._get(queue) + return msg + + def _queue_for(self, queue): + if queue not in self.queues(): + self.shared_queues.new_queue(queue) + return queue + + def _put(self, queue, message, **kwargs): + queue = self._queue_for(queue) + self.shared_queues._put(queue, message) + + def _size(self, queue): + return self.shared_queues._size(queue) + + def _delete(self, queue, *args): + self.shared_queues._delete(queue) + + def _purge(self, queue): + return self.shared_queues._purge(queue) + + def after_reply_message_received(self, queue): + pass + + @cached_property + def shared_queues(self): + return self.connection.shared_queues + + +class Transport(virtual.Transport): + Channel = Channel + + #: memory backend state is global. + state = virtual.BrokerState() + + default_port = DEFAULT_PORT + + driver_type = driver_name = 'pyro' + + def _open(self): + conninfo = self.client + pyro.config.HMAC_KEY = conninfo.virtual_host + try: + nameserver = pyro.locateNS(host=conninfo.hostname, + port=self.default_port) + # name of registered pyro object + uri = nameserver.lookup(conninfo.virtual_host) + return pyro.Proxy(uri) + except NamingError: + reraise(NamingError, NamingError(E_LOOKUP.format(conninfo)), + sys.exc_info()[2]) + + def driver_version(self): + return pyro.__version__ + + @cached_property + def shared_queues(self): + return self._open() diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/qpid.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/qpid.py new file mode 100644 index 0000000..242f6a1 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/qpid.py @@ -0,0 +1,1740 @@ +""" +kombu.transport.qpid +======================= + +`Qpid`_ transport using `qpid-python`_ as the client and `qpid-tools`_ for +broker management. + +The use this transport you must install the necessary dependencies. These +dependencies are available via PyPI and can be installed using the pip +command: + +.. code-block:: console + + $ pip install kombu[qpid] + +or to install the requirements manually: + +.. code-block:: console + + $ pip install qpid-tools qpid-python + +.. admonition:: Python 3 and PyPy Limitations + + The Qpid transport does not support Python 3 or PyPy environments due + to underlying dependencies not being compatible. This version is + tested and works with with Python 2.7. + +.. _`Qpid`: http://qpid.apache.org/ +.. _`qpid-python`: http://pypi.python.org/pypi/qpid-python/ +.. _`qpid-tools`: http://pypi.python.org/pypi/qpid-tools/ + +Authentication +============== + +This transport supports SASL authentication with the Qpid broker. Normally, +SASL mechanisms are negotiated from a client list and a server list of +possible mechanisms, but in practice, different SASL client libraries give +different behaviors. These different behaviors cause the expected SASL +mechanism to not be selected in many cases. As such, this transport restricts +the mechanism types based on Kombu's configuration according to the following +table. + ++------------------------------------+--------------------+ +| **Broker String** | **SASL Mechanism** | ++------------------------------------+--------------------+ +| qpid://hostname/ | ANONYMOUS | ++------------------------------------+--------------------+ +| qpid://username:password@hostname/ | PLAIN | ++------------------------------------+--------------------+ +| see instructions below | EXTERNAL | ++------------------------------------+--------------------+ + +The user can override the above SASL selection behaviors and specify the SASL +string using the :attr:`~kombu.Connection.login_method` argument to the +:class:`~kombu.Connection` object. The string can be a single SASL mechanism +or a space separated list of SASL mechanisms. If you are using Celery with +Kombu, this can be accomplished by setting the *BROKER_LOGIN_METHOD* Celery +option. + +.. note:: + + While using SSL, Qpid users may want to override the SASL mechanism to + use *EXTERNAL*. In that case, Qpid requires a username to be presented + that matches the *CN* of the SSL client certificate. Ensure that the + broker string contains the corresponding username. For example, if the + client certificate has *CN=asdf* and the client connects to *example.com* + on port 5671, the broker string should be: + + **qpid://asdf@example.com:5671/** + +Transport Options +================= + +The :attr:`~kombu.Connection.transport_options` argument to the +:class:`~kombu.Connection` object are passed directly to the +:class:`qpid.messaging.endpoints.Connection` as keyword arguments. These +options override and replace any other default or specified values. If using +Celery, this can be accomplished by setting the +*BROKER_TRANSPORT_OPTIONS* Celery option. + +""" +from __future__ import absolute_import + +import os +import select +import socket +import ssl +import sys +import time +import uuid + +from gettext import gettext as _ + +import amqp.protocol + +try: + import fcntl +except ImportError: + fcntl = None # noqa + +try: + import qpidtoollibs +except ImportError: # pragma: no cover + qpidtoollibs = None # noqa + +try: + from qpid.messaging.exceptions import ConnectionError, NotFound + from qpid.messaging.exceptions import Empty as QpidEmpty + from qpid.messaging.exceptions import SessionClosed +except ImportError: # pragma: no cover + ConnectionError = None + NotFound = None + QpidEmpty = None + SessionClosed = None + +try: + import qpid +except ImportError: # pragma: no cover + qpid = None + + +from kombu.five import Empty, items +from kombu.log import get_logger +from kombu.transport.virtual import Base64, Message +from kombu.transport import base +from kombu.utils.compat import OrderedDict + + +logger = get_logger(__name__) + + +OBJECT_ALREADY_EXISTS_STRING = 'object already exists' + +VERSION = (1, 0, 0) +__version__ = '.'.join(map(str, VERSION)) + +PY3 = sys.version_info[0] == 3 + + +def dependency_is_none(dependency): + """Return True if the dependency is None, otherwise False. This is done + using a function so that tests can mock this behavior easily. + + :param dependency: The module to check if it is None + :return: True if dependency is None otherwise False. + + """ + return dependency is None + + +class AuthenticationFailure(Exception): + pass + + +class QoS(object): + """A helper object for message prefetch and ACKing purposes. + + :keyword prefetch_count: Initial prefetch count, hard set to 1. + :type prefetch_count: int + + + NOTE: prefetch_count is currently hard set to 1, and needs to be improved + + This object is instantiated 1-for-1 with a + :class:`~.kombu.transport.qpid.Channel` instance. QoS allows + ``prefetch_count`` to be set to the number of outstanding messages + the corresponding :class:`~kombu.transport.qpid.Channel` should be + allowed to prefetch. Setting ``prefetch_count`` to 0 disables + prefetch limits, and the object can hold an arbitrary number of messages. + + Messages are added using :meth:`append`, which are held until they are + ACKed asynchronously through a call to :meth:`ack`. Messages that are + received, but not ACKed will not be delivered by the broker to another + consumer until an ACK is received, or the session is closed. Messages + are referred to using delivery_tag, which are unique per + :class:`Channel`. Delivery tags are managed outside of this object and + are passed in with a message to :meth:`append`. Un-ACKed messages can + be looked up from QoS using :meth:`get` and can be rejected and + forgotten using :meth:`reject`. + + """ + + def __init__(self, session, prefetch_count=1): + self.session = session + self.prefetch_count = 1 + self._not_yet_acked = OrderedDict() + + def can_consume(self): + """Return True if the :class:`~kombu.transport.qpid.Channel` can + consume more messages, else False. + + Used to ensure the client adheres to currently active prefetch + limits. + + :returns: True, if this QoS object can accept more messages + without violating the prefetch_count. If prefetch_count is 0, + can_consume will always return True. + :rtype: bool + + """ + return ( + not self.prefetch_count or + len(self._not_yet_acked) < self.prefetch_count + ) + + def can_consume_max_estimate(self): + """Return the remaining message capacity for the associated + :class:`kombu.transport.qpid.Channel`. + + Returns an estimated number of outstanding messages that a + :class:`kombu.transport.qpid.Channel` can accept without + exceeding ``prefetch_count``. If ``prefetch_count`` is 0, then + this method returns 1. + + :returns: The number of estimated messages that can be fetched + without violating the prefetch_count. + :rtype: int + + """ + return 1 if not self.prefetch_count else ( + self.prefetch_count - len(self._not_yet_acked) + ) + + def append(self, message, delivery_tag): + """Append message to the list of un-ACKed messages. + + Add a message, referenced by the delivery_tag, for ACKing, + rejecting, or getting later. Messages are saved into an + :class:`collections.OrderedDict` by delivery_tag. + + :param message: A received message that has not yet been ACKed. + :type message: qpid.messaging.Message + :param delivery_tag: A UUID to refer to this message by + upon receipt. + :type delivery_tag: uuid.UUID + + """ + self._not_yet_acked[delivery_tag] = message + + def get(self, delivery_tag): + """Get an un-ACKed message by delivery_tag. If called with an invalid + delivery_tag a :exc:`KeyError` is raised. + + :param delivery_tag: The delivery tag associated with the message + to be returned. + :type delivery_tag: uuid.UUID + + :return: An un-ACKed message that is looked up by delivery_tag. + :rtype: qpid.messaging.Message + + """ + return self._not_yet_acked[delivery_tag] + + def ack(self, delivery_tag): + """Acknowledge a message by delivery_tag. + + Called asynchronously once the message has been handled and can be + forgotten by the broker. + + :param delivery_tag: the delivery tag associated with the message + to be acknowledged. + :type delivery_tag: uuid.UUID + + """ + message = self._not_yet_acked.pop(delivery_tag) + self.session.acknowledge(message=message) + + def reject(self, delivery_tag, requeue=False): + """Reject a message by delivery_tag. + + Explicitly notify the broker that the channel associated + with this QoS object is rejecting the message that was previously + delivered. + + If requeue is False, then the message is not requeued for delivery + to another consumer. If requeue is True, then the message is + requeued for delivery to another consumer. + + :param delivery_tag: The delivery tag associated with the message + to be rejected. + :type delivery_tag: uuid.UUID + :keyword requeue: If True, the broker will be notified to requeue + the message. If False, the broker will be told to drop the + message entirely. In both cases, the message will be removed + from this object. + :type requeue: bool + + """ + message = self._not_yet_acked.pop(delivery_tag) + QpidDisposition = qpid.messaging.Disposition + if requeue: + disposition = QpidDisposition(qpid.messaging.RELEASED) + else: + disposition = QpidDisposition(qpid.messaging.REJECTED) + self.session.acknowledge(message=message, disposition=disposition) + + +class Channel(base.StdChannel): + """Supports broker configuration and messaging send and receive. + + :param connection: A Connection object that this Channel can + reference. Currently only used to access callbacks. + :type connection: kombu.transport.qpid.Connection + :param transport: The Transport this Channel is associated with. + :type transport: kombu.transport.qpid.Transport + + A channel object is designed to have method-parity with a Channel as + defined in AMQP 0-10 and earlier, which allows for the following broker + actions: + + - exchange declare and delete + - queue declare and delete + - queue bind and unbind operations + - queue length and purge operations + - sending/receiving/rejecting messages + - structuring, encoding, and decoding messages + - supports synchronous and asynchronous reads + - reading state about the exchange, queues, and bindings + + Channels are designed to all share a single TCP connection with a + broker, but provide a level of isolated communication with the broker + while benefiting from a shared TCP connection. The Channel is given + its :class:`~kombu.transport.qpid.Connection` object by the + :class:`~kombu.transport.qpid.Transport` that + instantiates the channel. + + This channel inherits from :class:`~kombu.transport.base.StdChannel`, + which makes this a 'native' channel versus a 'virtual' channel which + would inherit from :class:`kombu.transports.virtual`. + + Messages sent using this channel are assigned a delivery_tag. The + delivery_tag is generated for a message as they are prepared for + sending by :meth:`basic_publish`. The delivery_tag is unique per + channel instance. The delivery_tag has no meaningful context in other + objects, and is only maintained in the memory of this object, and the + underlying :class:`QoS` object that provides support. + + Each channel object instantiates exactly one :class:`QoS` object for + prefetch limiting, and asynchronous ACKing. The :class:`QoS` object is + lazily instantiated through a property method :meth:`qos`. The + :class:`QoS` object is a supporting object that should not be accessed + directly except by the channel itself. + + Synchronous reads on a queue are done using a call to :meth:`basic_get` + which uses :meth:`_get` to perform the reading. These methods read + immediately and do not accept any form of timeout. :meth:`basic_get` + reads synchronously and ACKs messages before returning them. ACKing is + done in all cases, because an application that reads messages using + qpid.messaging, but does not ACK them will experience a memory leak. + The no_ack argument to :meth:`basic_get` does not affect ACKing + functionality. + + Asynchronous reads on a queue are done by starting a consumer using + :meth:`basic_consume`. Each call to :meth:`basic_consume` will cause a + :class:`~qpid.messaging.endpoints.Receiver` to be created on the + :class:`~qpid.messaging.endpoints.Session` started by the :class: + `Transport`. The receiver will asynchronously read using + qpid.messaging, and prefetch messages before the call to + :meth:`Transport.basic_drain` occurs. The prefetch_count value of the + :class:`QoS` object is the capacity value of the new receiver. The new + receiver capacity must always be at least 1, otherwise none of the + receivers will appear to be ready for reading, and will never be read + from. + + Each call to :meth:`basic_consume` creates a consumer, which is given a + consumer tag that is identified by the caller of :meth:`basic_consume`. + Already started consumers can be cancelled using by their consumer_tag + using :meth:`basic_cancel`. Cancellation of a consumer causes the + :class:`~qpid.messaging.endpoints.Receiver` object to be closed. + + Asynchronous message ACKing is supported through :meth:`basic_ack`, + and is referenced by delivery_tag. The Channel object uses its + :class:`QoS` object to perform the message ACKing. + + """ + + #: A class reference that will be instantiated using the qos property. + QoS = QoS + + #: A class reference that identifies + # :class:`~kombu.transport.virtual.Message` as the message class type + Message = Message + + #: Default body encoding. + #: NOTE: ``transport_options['body_encoding']`` will override this value. + body_encoding = 'base64' + + #: Binary <-> ASCII codecs. + codecs = {'base64': Base64()} + + def __init__(self, connection, transport): + self.connection = connection + self.transport = transport + qpid_connection = connection.get_qpid_connection() + self._broker = qpidtoollibs.BrokerAgent(qpid_connection) + self.closed = False + self._tag_to_queue = {} + self._receivers = {} + self._qos = None + + def _get(self, queue): + """Non-blocking, single-message read from a queue. + + An internal method to perform a non-blocking, single-message read + from a queue by name. This method creates a + :class:`~qpid.messaging.endpoints.Receiver` to read from the queue + using the :class:`~qpid.messaging.endpoints.Session` saved on the + associated :class:`~kombu.transport.qpid.Transport`. The receiver + is closed before the method exits. If a message is available, a + :class:`qpid.messaging.Message` object is returned. If no message is + available, a :class:`qpid.messaging.exceptions.Empty` exception is + raised. + + This is an internal method. External calls for get functionality + should be done using :meth:`basic_get`. + + :param queue: The queue name to get the message from + :type queue: str + + :return: The received message. + :rtype: :class:`qpid.messaging.Message` + :raises: :class:`qpid.messaging.exceptions.Empty` if no + message is available. + + """ + rx = self.transport.session.receiver(queue) + try: + message = rx.fetch(timeout=0) + finally: + rx.close() + return message + + def _put(self, routing_key, message, exchange=None, **kwargs): + """Synchronous send of a single message onto a queue or exchange. + + An internal method which synchronously sends a single message onto + a given queue or exchange. If exchange is not specified, + the message is sent directly to a queue specified by routing_key. + If no queue is found by the name of routing_key while exchange is + not specified an exception is raised. If an exchange is specified, + then the message is delivered onto the requested + exchange using routing_key. Message sending is synchronous using + sync=True because large messages in kombu funtests were not being + fully sent before the receiver closed. + + This method creates a :class:`qpid.messaging.endpoints.Sender` to + send the message to the queue using the + :class:`qpid.messaging.endpoints.Session` created and referenced by + the associated :class:`~kombu.transport.qpid.Transport`. The sender + is closed before the method exits. + + External calls for put functionality should be done using + :meth:`basic_publish`. + + :param routing_key: If exchange is None, treated as the queue name + to send the message to. If exchange is not None, treated as the + routing_key to use as the message is submitted onto the exchange. + :type routing_key: str + :param message: The message to be sent as prepared by + :meth:`basic_publish`. + :type message: dict + :keyword exchange: keyword parameter of the exchange this message + should be sent on. If no exchange is specified, the message is + sent directly to a queue specified by routing_key. + :type exchange: str + + """ + if not exchange: + address = '%s; {assert: always, node: {type: queue}}' % ( + routing_key,) + msg_subject = None + else: + address = '%s/%s; {assert: always, node: {type: topic}}' % ( + exchange, routing_key) + msg_subject = str(routing_key) + sender = self.transport.session.sender(address) + qpid_message = qpid.messaging.Message(content=message, + subject=msg_subject) + try: + sender.send(qpid_message, sync=True) + finally: + sender.close() + + def _purge(self, queue): + """Purge all undelivered messages from a queue specified by name. + + An internal method to purge all undelivered messages from a queue + specified by name. If the queue does not exist a + :class:`qpid.messaging.exceptions.NotFound` exception is raised. + + The queue message depth is first checked, and then the broker is + asked to purge that number of messages. The integer number of + messages requested to be purged is returned. The actual number of + messages purged may be different than the requested number of + messages to purge (see below). + + Sometimes delivered messages are asked to be purged, but are not. + This case fails silently, which is the correct behavior when a + message that has been delivered to a different consumer, who has + not ACKed the message, and still has an active session with the + broker. Messages in that case are not safe for purging and will be + retained by the broker. The client is unable to change this + delivery behavior. + + This is an internal method. External calls for purge functionality + should be done using :meth:`queue_purge`. + + :param queue: the name of the queue to be purged + :type queue: str + + :return: The number of messages requested to be purged. + :rtype: int + + :raises: :class:`qpid.messaging.exceptions.NotFound` if the queue + being purged cannot be found. + + """ + queue_to_purge = self._broker.getQueue(queue) + if queue_to_purge is None: + error_text = "NOT_FOUND - no queue '{0}'".format(queue) + raise NotFound(code=404, text=error_text) + message_count = queue_to_purge.values['msgDepth'] + if message_count > 0: + queue_to_purge.purge(message_count) + return message_count + + def _size(self, queue): + """Get the number of messages in a queue specified by name. + + An internal method to return the number of messages in a queue + specified by name. It returns an integer count of the number + of messages currently in the queue. + + :param queue: The name of the queue to be inspected for the number + of messages + :type queue: str + + :return the number of messages in the queue specified by name. + :rtype: int + + """ + queue_to_check = self._broker.getQueue(queue) + message_depth = queue_to_check.values['msgDepth'] + return message_depth + + def _delete(self, queue, *args, **kwargs): + """Delete a queue and all messages on that queue. + + An internal method to delete a queue specified by name and all the + messages on it. First, all messages are purged from a queue using a + call to :meth:`_purge`. Second, the broker is asked to delete the + queue. + + This is an internal method. External calls for queue delete + functionality should be done using :meth:`queue_delete`. + + :param queue: The name of the queue to be deleted. + :type queue: str + + """ + self._purge(queue) + self._broker.delQueue(queue) + + def _has_queue(self, queue, **kwargs): + """Determine if the broker has a queue specified by name. + + :param queue: The queue name to check if the queue exists. + :type queue: str + + :return: True if a queue exists on the broker, and false + otherwise. + :rtype: bool + + """ + if self._broker.getQueue(queue): + return True + else: + return False + + def queue_declare(self, queue, passive=False, durable=False, + exclusive=False, auto_delete=True, nowait=False, + arguments=None): + """Create a new queue specified by name. + + If the queue already exists, no change is made to the queue, + and the return value returns information about the existing queue. + + The queue name is required and specified as the first argument. + + If passive is True, the server will not create the queue. The + client can use this to check whether a queue exists without + modifying the server state. Default is False. + + If durable is True, the queue will be durable. Durable queues + remain active when a server restarts. Non-durable queues ( + transient queues) are purged if/when a server restarts. Note that + durable queues do not necessarily hold persistent messages, + although it does not make sense to send persistent messages to a + transient queue. Default is False. + + If exclusive is True, the queue will be exclusive. Exclusive queues + may only be consumed by the current connection. Setting the + 'exclusive' flag always implies 'auto-delete'. Default is False. + + If auto_delete is True, the queue is deleted when all consumers + have finished using it. The last consumer can be cancelled either + explicitly or because its channel is closed. If there was no + consumer ever on the queue, it won't be deleted. Default is True. + + The nowait parameter is unused. It was part of the 0-9-1 protocol, + but this AMQP client implements 0-10 which removed the nowait option. + + The arguments parameter is a set of arguments for the declaration of + the queue. Arguments are passed as a dict or None. This field is + ignored if passive is True. Default is None. + + This method returns a :class:`~collections.namedtuple` with the name + 'queue_declare_ok_t' and the queue name as 'queue', message count + on the queue as 'message_count', and the number of active consumers + as 'consumer_count'. The named tuple values are ordered as queue, + message_count, and consumer_count respectively. + + Due to Celery's non-ACKing of events, a ring policy is set on any + queue that starts with the string 'celeryev' or ends with the string + 'pidbox'. These are celery event queues, and Celery does not ack + them, causing the messages to build-up. Eventually Qpid stops serving + messages unless the 'ring' policy is set, at which point the buffer + backing the queue becomes circular. + + :param queue: The name of the queue to be created. + :type queue: str + :param passive: If True, the sever will not create the queue. + :type passive: bool + :param durable: If True, the queue will be durable. + :type durable: bool + :param exclusive: If True, the queue will be exclusive. + :type exclusive: bool + :param auto_delete: If True, the queue is deleted when all + consumers have finished using it. + :type auto_delete: bool + :param nowait: This parameter is unused since the 0-10 + specification does not include it. + :type nowait: bool + :param arguments: A set of arguments for the declaration of the + queue. + :type arguments: dict or None + + :return: A named tuple representing the declared queue as a named + tuple. The tuple values are ordered as queue, message count, + and the active consumer count. + :rtype: :class:`~collections.namedtuple` + + """ + options = {'passive': passive, + 'durable': durable, + 'exclusive': exclusive, + 'auto-delete': auto_delete, + 'arguments': arguments} + if queue.startswith('celeryev') or queue.endswith('pidbox'): + options['qpid.policy_type'] = 'ring' + try: + self._broker.addQueue(queue, options=options) + except Exception as exc: + if OBJECT_ALREADY_EXISTS_STRING not in str(exc): + raise exc + queue_to_check = self._broker.getQueue(queue) + message_count = queue_to_check.values['msgDepth'] + consumer_count = queue_to_check.values['consumerCount'] + return amqp.protocol.queue_declare_ok_t(queue, message_count, + consumer_count) + + def queue_delete(self, queue, if_unused=False, if_empty=False, **kwargs): + """Delete a queue by name. + + Delete a queue specified by name. Using the if_unused keyword + argument, the delete can only occur if there are 0 consumers bound + to it. Using the if_empty keyword argument, the delete can only + occur if there are 0 messages in the queue. + + :param queue: The name of the queue to be deleted. + :type queue: str + :keyword if_unused: If True, delete only if the queue has 0 + consumers. If False, delete a queue even with consumers bound + to it. + :type if_unused: bool + :keyword if_empty: If True, only delete the queue if it is empty. If + False, delete the queue if it is empty or not. + :type if_empty: bool + + """ + if self._has_queue(queue): + if if_empty and self._size(queue): + return + queue_obj = self._broker.getQueue(queue) + consumer_count = queue_obj.getAttributes()['consumerCount'] + if if_unused and consumer_count > 0: + return + self._delete(queue) + + def exchange_declare(self, exchange='', type='direct', durable=False, + **kwargs): + """Create a new exchange. + + Create an exchange of a specific type, and optionally have the + exchange be durable. If an exchange of the requested name already + exists, no action is taken and no exceptions are raised. Durable + exchanges will survive a broker restart, non-durable exchanges will + not. + + Exchanges provide behaviors based on their type. The expected + behaviors are those defined in the AMQP 0-10 and prior + specifications including 'direct', 'topic', and 'fanout' + functionality. + + :keyword type: The exchange type. Valid values include 'direct', + 'topic', and 'fanout'. + :type type: str + :keyword exchange: The name of the exchange to be created. If no + exchange is specified, then a blank string will be used as the + name. + :type exchange: str + :keyword durable: True if the exchange should be durable, or False + otherwise. + :type durable: bool + + """ + options = {'durable': durable} + try: + self._broker.addExchange(type, exchange, options) + except Exception as exc: + if OBJECT_ALREADY_EXISTS_STRING not in str(exc): + raise exc + + def exchange_delete(self, exchange_name, **kwargs): + """Delete an exchange specified by name + + :param exchange_name: The name of the exchange to be deleted. + :type exchange_name: str + + """ + self._broker.delExchange(exchange_name) + + def queue_bind(self, queue, exchange, routing_key, **kwargs): + """Bind a queue to an exchange with a bind key. + + Bind a queue specified by name, to an exchange specified by name, + with a specific bind key. The queue and exchange must already + exist on the broker for the bind to complete successfully. Queues + may be bound to exchanges multiple times with different keys. + + :param queue: The name of the queue to be bound. + :type queue: str + :param exchange: The name of the exchange that the queue should be + bound to. + :type exchange: str + :param routing_key: The bind key that the specified queue should + bind to the specified exchange with. + :type routing_key: str + + """ + self._broker.bind(exchange, queue, routing_key) + + def queue_unbind(self, queue, exchange, routing_key, **kwargs): + """Unbind a queue from an exchange with a given bind key. + + Unbind a queue specified by name, from an exchange specified by + name, that is already bound with a bind key. The queue and + exchange must already exist on the broker, and bound with the bind + key for the operation to complete successfully. Queues may be + bound to exchanges multiple times with different keys, thus the + bind key is a required field to unbind in an explicit way. + + :param queue: The name of the queue to be unbound. + :type queue: str + :param exchange: The name of the exchange that the queue should be + unbound from. + :type exchange: str + :param routing_key: The existing bind key between the specified + queue and a specified exchange that should be unbound. + :type routing_key: str + + """ + self._broker.unbind(exchange, queue, routing_key) + + def queue_purge(self, queue, **kwargs): + """Remove all undelivered messages from queue. + + Purge all undelivered messages from a queue specified by name. If the + queue does not exist an exception is raised. The queue message + depth is first checked, and then the broker is asked to purge that + number of messages. The integer number of messages requested to be + purged is returned. The actual number of messages purged may be + different than the requested number of messages to purge. + + Sometimes delivered messages are asked to be purged, but are not. + This case fails silently, which is the correct behavior when a + message that has been delivered to a different consumer, who has + not ACKed the message, and still has an active session with the + broker. Messages in that case are not safe for purging and will be + retained by the broker. The client is unable to change this + delivery behavior. + + Internally, this method relies on :meth:`_purge`. + + :param queue: The name of the queue which should have all messages + removed. + :type queue: str + + :return: The number of messages requested to be purged. + :rtype: int + + :raises: :class:`qpid.messaging.exceptions.NotFound` if the queue + being purged cannot be found. + + """ + return self._purge(queue) + + def basic_get(self, queue, no_ack=False, **kwargs): + """Non-blocking single message get and ACK from a queue by name. + + Internally this method uses :meth:`_get` to fetch the message. If + an :class:`~qpid.messaging.exceptions.Empty` exception is raised by + :meth:`_get`, this method silences it and returns None. If + :meth:`_get` does return a message, that message is ACKed. The no_ack + parameter has no effect on ACKing behavior, and all messages are + ACKed in all cases. This method never adds fetched Messages to the + internal QoS object for asynchronous ACKing. + + This method converts the object type of the method as it passes + through. Fetching from the broker, :meth:`_get` returns a + :class:`qpid.messaging.Message`, but this method takes the payload + of the :class:`qpid.messaging.Message` and instantiates a + :class:`~kombu.transport.virtual.Message` object with the payload + based on the class setting of self.Message. + + :param queue: The queue name to fetch a message from. + :type queue: str + :keyword no_ack: The no_ack parameter has no effect on the ACK + behavior of this method. Un-ACKed messages create a memory leak in + qpid.messaging, and need to be ACKed in all cases. + :type noack: bool + + :return: The received message. + :rtype: :class:`~kombu.transport.virtual.Message` + + """ + try: + qpid_message = self._get(queue) + raw_message = qpid_message.content + message = self.Message(self, raw_message) + self.transport.session.acknowledge(message=qpid_message) + return message + except Empty: + pass + + def basic_ack(self, delivery_tag): + """Acknowledge a message by delivery_tag. + + Acknowledges a message referenced by delivery_tag. Messages can + only be ACKed using :meth:`basic_ack` if they were acquired using + :meth:`basic_consume`. This is the ACKing portion of the + asynchronous read behavior. + + Internally, this method uses the :class:`QoS` object, which stores + messages and is responsible for the ACKing. + + :param delivery_tag: The delivery tag associated with the message + to be acknowledged. + :type delivery_tag: uuid.UUID + + """ + self.qos.ack(delivery_tag) + + def basic_reject(self, delivery_tag, requeue=False): + """Reject a message by delivery_tag. + + Rejects a message that has been received by the Channel, but not + yet acknowledged. Messages are referenced by their delivery_tag. + + If requeue is False, the rejected message will be dropped by the + broker and not delivered to any other consumers. If requeue is + True, then the rejected message will be requeued for delivery to + another consumer, potentially to the same consumer who rejected the + message previously. + + :param delivery_tag: The delivery tag associated with the message + to be rejected. + :type delivery_tag: uuid.UUID + :keyword requeue: If False, the rejected message will be dropped by + the broker and not delivered to any other consumers. If True, + then the rejected message will be requeued for delivery to + another consumer, potentially to the same consumer who rejected + the message previously. + :type requeue: bool + + """ + self.qos.reject(delivery_tag, requeue=requeue) + + def basic_consume(self, queue, no_ack, callback, consumer_tag, **kwargs): + """Start an asynchronous consumer that reads from a queue. + + This method starts a consumer of type + :class:`~qpid.messaging.endpoints.Receiver` using the + :class:`~qpid.messaging.endpoints.Session` created and referenced by + the :class:`Transport` that reads messages from a queue + specified by name until stopped by a call to :meth:`basic_cancel`. + + + Messages are available later through a synchronous call to + :meth:`Transport.drain_events`, which will drain from the consumer + started by this method. :meth:`Transport.drain_events` is + synchronous, but the receiving of messages over the network occurs + asynchronously, so it should still perform well. + :meth:`Transport.drain_events` calls the callback provided here with + the Message of type self.Message. + + Each consumer is referenced by a consumer_tag, which is provided by + the caller of this method. + + This method sets up the callback onto the self.connection object in a + dict keyed by queue name. :meth:`~Transport.drain_events` is + responsible for calling that callback upon message receipt. + + All messages that are received are added to the QoS object to be + saved for asynchronous ACKing later after the message has been + handled by the caller of :meth:`~Transport.drain_events`. Messages + can be ACKed after being received through a call to :meth:`basic_ack`. + + If no_ack is True, The no_ack flag indicates that the receiver of + the message will not call :meth:`basic_ack` later. Since the + message will not be ACKed later, it is ACKed immediately. + + :meth:`basic_consume` transforms the message object type prior to + calling the callback. Initially the message comes in as a + :class:`qpid.messaging.Message`. This method unpacks the payload + of the :class:`qpid.messaging.Message` and creates a new object of + type self.Message. + + This method wraps the user delivered callback in a runtime-built + function which provides the type transformation from + :class:`qpid.messaging.Message` to + :class:`~kombu.transport.virtual.Message`, and adds the message to + the associated :class:`QoS` object for asynchronous ACKing + if necessary. + + :param queue: The name of the queue to consume messages from + :type queue: str + :param no_ack: If True, then messages will not be saved for ACKing + later, but will be ACKed immediately. If False, then messages + will be saved for ACKing later with a call to :meth:`basic_ack`. + :type no_ack: bool + :param callback: a callable that will be called when messages + arrive on the queue. + :type callback: a callable object + :param consumer_tag: a tag to reference the created consumer by. + This consumer_tag is needed to cancel the consumer. + :type consumer_tag: an immutable object + + """ + self._tag_to_queue[consumer_tag] = queue + + def _callback(qpid_message): + raw_message = qpid_message.content + message = self.Message(self, raw_message) + delivery_tag = message.delivery_tag + self.qos.append(qpid_message, delivery_tag) + if no_ack: + # Celery will not ack this message later, so we should ack now + self.basic_ack(delivery_tag) + return callback(message) + + self.connection._callbacks[queue] = _callback + new_receiver = self.transport.session.receiver(queue) + new_receiver.capacity = self.qos.prefetch_count + self._receivers[consumer_tag] = new_receiver + + def basic_cancel(self, consumer_tag): + """Cancel consumer by consumer tag. + + Request the consumer stops reading messages from its queue. The + consumer is a :class:`~qpid.messaging.endpoints.Receiver`, and it is + closed using :meth:`~qpid.messaging.endpoints.Receiver.close`. + + This method also cleans up all lingering references of the consumer. + + :param consumer_tag: The tag which refers to the consumer to be + cancelled. Originally specified when the consumer was created + as a parameter to :meth:`basic_consume`. + :type consumer_tag: an immutable object + + """ + if consumer_tag in self._receivers: + receiver = self._receivers.pop(consumer_tag) + receiver.close() + queue = self._tag_to_queue.pop(consumer_tag, None) + self.connection._callbacks.pop(queue, None) + + def close(self): + """Cancel all associated messages and close the Channel. + + This cancels all consumers by calling :meth:`basic_cancel` for each + known consumer_tag. It also closes the self._broker sessions. Closing + the sessions implicitly causes all outstanding, un-ACKed messages to + be considered undelivered by the broker. + + """ + if not self.closed: + self.closed = True + for consumer_tag in self._receivers.keys(): + self.basic_cancel(consumer_tag) + if self.connection is not None: + self.connection.close_channel(self) + self._broker.close() + + @property + def qos(self): + """:class:`QoS` manager for this channel. + + Lazily instantiates an object of type :class:`QoS` upon access to + the self.qos attribute. + + :return: An already existing, or newly created QoS object + :rtype: :class:`QoS` + + """ + if self._qos is None: + self._qos = self.QoS(self.transport.session) + return self._qos + + def basic_qos(self, prefetch_count, *args): + """Change :class:`QoS` settings for this Channel. + + Set the number of un-acknowledged messages this Channel can fetch and + hold. The prefetch_value is also used as the capacity for any new + :class:`~qpid.messaging.endpoints.Receiver` objects. + + Currently, this value is hard coded to 1. + + :param prefetch_count: Not used. This method is hard-coded to 1. + :type prefetch_count: int + + """ + self.qos.prefetch_count = 1 + + def prepare_message(self, body, priority=None, content_type=None, + content_encoding=None, headers=None, properties=None): + """Prepare message data for sending. + + This message is typically called by + :meth:`kombu.messaging.Producer._publish` as a preparation step in + message publication. + + :param body: The body of the message + :type body: str + :keyword priority: A number between 0 and 9 that sets the priority of + the message. + :type priority: int + :keyword content_type: The content_type the message body should be + treated as. If this is unset, the + :class:`qpid.messaging.endpoints.Sender` object tries to + autodetect the content_type from the body. + :type content_type: str + :keyword content_encoding: The content_encoding the message body is + encoded as. + :type content_encoding: str + :keyword headers: Additional Message headers that should be set. + Passed in as a key-value pair. + :type headers: dict + :keyword properties: Message properties to be set on the message. + :type properties: dict + + :return: Returns a dict object that encapsulates message + attributes. See parameters for more details on attributes that + can be set. + :rtype: dict + + """ + properties = properties or {} + info = properties.setdefault('delivery_info', {}) + info['priority'] = priority or 0 + + return {'body': body, + 'content-encoding': content_encoding, + 'content-type': content_type, + 'headers': headers or {}, + 'properties': properties or {}} + + def basic_publish(self, message, exchange, routing_key, **kwargs): + """Publish message onto an exchange using a routing key. + + Publish a message onto an exchange specified by name using a + routing key specified by routing_key. Prepares the message in the + following ways before sending: + + - encodes the body using :meth:`encode_body` + - wraps the body as a buffer object, so that + :class:`qpid.messaging.endpoints.Sender` uses a content type + that can support arbitrarily large messages. + - sets delivery_tag to a random uuid.UUID + - sets the exchange and routing_key info as delivery_info + + Internally uses :meth:`_put` to send the message synchronously. This + message is typically called by + :class:`kombu.messaging.Producer._publish` as the final step in + message publication. + + :param message: A dict containing key value pairs with the message + data. A valid message dict can be generated using the + :meth:`prepare_message` method. + :type message: dict + :param exchange: The name of the exchange to submit this message + onto. + :type exchange: str + :param routing_key: The routing key to be used as the message is + submitted onto the exchange. + :type routing_key: str + + """ + message['body'], body_encoding = self.encode_body( + message['body'], self.body_encoding, + ) + message['body'] = buffer(message['body']) + props = message['properties'] + props.update( + body_encoding=body_encoding, + delivery_tag=uuid.uuid4(), + ) + props['delivery_info'].update( + exchange=exchange, + routing_key=routing_key, + ) + self._put(routing_key, message, exchange, **kwargs) + + def encode_body(self, body, encoding=None): + """Encode a body using an optionally specified encoding. + + The encoding can be specified by name, and is looked up in + self.codecs. self.codecs uses strings as its keys which specify + the name of the encoding, and then the value is an instantiated + object that can provide encoding/decoding of that type through + encode and decode methods. + + :param body: The body to be encoded. + :type body: str + :keyword encoding: The encoding type to be used. Must be a supported + codec listed in self.codecs. + :type encoding: str + + :return: If encoding is specified, return a tuple with the first + position being the encoded body, and the second position the + encoding used. If encoding is not specified, the body is passed + through unchanged. + :rtype: tuple + + """ + if encoding: + return self.codecs.get(encoding).encode(body), encoding + return body, encoding + + def decode_body(self, body, encoding=None): + """Decode a body using an optionally specified encoding. + + The encoding can be specified by name, and is looked up in + self.codecs. self.codecs uses strings as its keys which specify + the name of the encoding, and then the value is an instantiated + object that can provide encoding/decoding of that type through + encode and decode methods. + + :param body: The body to be encoded. + :type body: str + :keyword encoding: The encoding type to be used. Must be a supported + codec listed in self.codecs. + :type encoding: str + + :return: If encoding is specified, the decoded body is returned. + If encoding is not specified, the body is returned unchanged. + :rtype: str + + """ + if encoding: + return self.codecs.get(encoding).decode(body) + return body + + def typeof(self, exchange, default='direct'): + """Get the exchange type. + + Lookup and return the exchange type for an exchange specified by + name. Exchange types are expected to be 'direct', 'topic', + and 'fanout', which correspond with exchange functionality as + specified in AMQP 0-10 and earlier. If the exchange cannot be + found, the default exchange type is returned. + + :param exchange: The exchange to have its type lookup up. + :type exchange: str + :keyword default: The type of exchange to assume if the exchange does + not exist. + :type default: str + + :return: The exchange type either 'direct', 'topic', or 'fanout'. + :rtype: str + + """ + qpid_exchange = self._broker.getExchange(exchange) + if qpid_exchange: + qpid_exchange_attributes = qpid_exchange.getAttributes() + return qpid_exchange_attributes['type'] + else: + return default + + +class Connection(object): + """Encapsulate a connection object for the + :class:`~kombu.transport.qpid.Transport`. + + :param host: The host that connections should connect to. + :param port: The port that connection should connect to. + :param username: The username that connections should connect with. + Optional. + :param password: The password that connections should connect with. + Optional but requires a username. + :param transport: The transport type that connections should use. + Either 'tcp', or 'ssl' are expected as values. + :param timeout: the timeout used when a Connection connects + to the broker. + :param sasl_mechanisms: The sasl authentication mechanism type to use. + refer to SASL documentation for an explanation of valid + values. + + .. note:: + + qpid.messaging has an AuthenticationFailure exception type, but + instead raises a ConnectionError with a message that indicates an + authentication failure occurred in those situations. + ConnectionError is listed as a recoverable error type, so kombu + will attempt to retry if a ConnectionError is raised. Retrying + the operation without adjusting the credentials is not correct, + so this method specifically checks for a ConnectionError that + indicates an Authentication Failure occurred. In those + situations, the error type is mutated while preserving the + original message and raised so kombu will allow the exception to + not be considered recoverable. + + + A connection object is created by a + :class:`~kombu.transport.qpid.Transport` during a call to + :meth:`~kombu.transport.qpid.Transport.establish_connection`. The + :class:`~kombu.transport.qpid.Transport` passes in + connection options as keywords that should be used for any connections + created. Each :class:`~kombu.transport.qpid.Transport` creates exactly + one Connection. + + A Connection object maintains a reference to a + :class:`~qpid.messaging.endpoints.Connection` which can be accessed + through a bound getter method named :meth:`get_qpid_connection` method. + Each Channel uses a the Connection for each + :class:`~qpidtoollibs.BrokerAgent`, and the Transport maintains a session + for all senders and receivers. + + The Connection object is also responsible for maintaining the + dictionary of references to callbacks that should be called when + messages are received. These callbacks are saved in _callbacks, + and keyed on the queue name associated with the received message. The + _callbacks are setup in :meth:`Channel.basic_consume`, removed in + :meth:`Channel.basic_cancel`, and called in + :meth:`Transport.drain_events`. + + The following keys are expected to be passed in as keyword arguments + at a minimum: + + All keyword arguments are collected into the connection_options dict + and passed directly through to + :meth:`qpid.messaging.endpoints.Connection.establish`. + + """ + + # A class reference to the :class:`Channel` object + Channel = Channel + + def __init__(self, **connection_options): + self.connection_options = connection_options + self.channels = [] + self._callbacks = {} + self._qpid_conn = None + establish = qpid.messaging.Connection.establish + + # There are several inconsistent behaviors in the sasl libraries + # used on different systems. Although qpid.messaging allows + # multiple space separated sasl mechanisms, this implementation + # only advertises one type to the server. These are either + # ANONYMOUS, PLAIN, or an overridden value specified by the user. + + sasl_mech = connection_options['sasl_mechanisms'] + + try: + msg = _('Attempting to connect to qpid with ' + 'SASL mechanism %s') % sasl_mech + logger.debug(msg) + self._qpid_conn = establish(**self.connection_options) + # connection was successful if we got this far + msg = _('Connected to qpid with SASL ' + 'mechanism %s') % sasl_mech + logger.info(msg) + except ConnectionError as conn_exc: + # if we get one of these errors, do not raise an exception. + # Raising will cause the connection to be retried. Instead, + # just continue on to the next mech. + coded_as_auth_failure = getattr(conn_exc, 'code', None) == 320 + contains_auth_fail_text = \ + 'Authentication failed' in conn_exc.text + contains_mech_fail_text = \ + 'sasl negotiation failed: no mechanism agreed' \ + in conn_exc.text + contains_mech_unavail_text = 'no mechanism available' \ + in conn_exc.text + if coded_as_auth_failure or \ + contains_auth_fail_text or contains_mech_fail_text or \ + contains_mech_unavail_text: + msg = _('Unable to connect to qpid with SASL ' + 'mechanism %s') % sasl_mech + logger.error(msg) + raise AuthenticationFailure(sys.exc_info()[1]) + raise + + def get_qpid_connection(self): + """Return the existing connection (singleton). + + :return: The existing qpid.messaging.Connection + :rtype: :class:`qpid.messaging.endpoints.Connection` + + """ + return self._qpid_conn + + def close(self): + """Close the connection + + Closing the connection will close all associated session, senders, or + receivers used by the Connection. + + """ + self._qpid_conn.close() + + def close_channel(self, channel): + """Close a Channel. + + Close a channel specified by a reference to the + :class:`~kombu.transport.qpid.Channel` object. + + :param channel: Channel that should be closed. + :type channel: :class:`~kombu.transport.qpid.Channel`. + + """ + try: + self.channels.remove(channel) + except ValueError: + pass + finally: + channel.connection = None + + +class Transport(base.Transport): + """Kombu native transport for a Qpid broker. + + Provide a native transport for Kombu that allows consumers and + producers to read and write messages to/from a broker. This Transport + is capable of supporting both synchronous and asynchronous reading. + All writes are synchronous through the :class:`Channel` objects that + support this Transport. + + Asynchronous reads are done using a call to :meth:`drain_events`, + which synchronously reads messages that were fetched asynchronously, and + then handles them through calls to the callback handlers maintained on + the :class:`Connection` object. + + The Transport also provides methods to establish and close a connection + to the broker. This Transport establishes a factory-like pattern that + allows for singleton pattern to consolidate all Connections into a single + one. + + The Transport can create :class:`Channel` objects to communicate with the + broker with using the :meth:`create_channel` method. + + The Transport identifies recoverable connection errors and recoverable + channel errors according to the Kombu 3.0 interface. These exception are + listed as tuples and store in the Transport class attribute + `recoverable_connection_errors` and `recoverable_channel_errors` + respectively. Any exception raised that is not a member of one of these + tuples is considered non-recoverable. This allows Kombu support for + automatic retry of certain operations to function correctly. + + For backwards compatibility to the pre Kombu 3.0 exception interface, the + recoverable errors are also listed as `connection_errors` and + `channel_errors`. + + """ + + # Reference to the class that should be used as the Connection object + Connection = Connection + + # This Transport does not specify a polling interval. + polling_interval = None + + # This Transport does support the Celery asynchronous event model. + supports_ev = True + + # The driver type and name for identification purposes. + driver_type = 'qpid' + driver_name = 'qpid' + + # Exceptions that can be recovered from, but where the connection must be + # closed and re-established first. + recoverable_connection_errors = ( + ConnectionError, + select.error, + ) + + # Exceptions that can be automatically recovered from without + # re-establishing the connection. + recoverable_channel_errors = ( + NotFound, + ) + + # Support the pre 3.0 Kombu exception labeling interface which treats + # connection_errors and channel_errors both as recoverable via a + # reconnect. + connection_errors = recoverable_connection_errors + channel_errors = recoverable_channel_errors + + def __init__(self, *args, **kwargs): + self.verify_runtime_environment() + super(Transport, self).__init__(*args, **kwargs) + self.use_async_interface = False + + def verify_runtime_environment(self): + """Verify that the runtime environment is acceptable. + + This method is called as part of __init__ and raises a RuntimeError + in Python3 or PyPi environments. This module is not compatible with + Python3 or PyPi. The RuntimeError identifies this to the user up + front along with suggesting Python 2.6+ be used instead. + + This method also checks that the dependencies qpidtoollibs and + qpid.messaging are installed. If either one is not installed a + RuntimeError is raised. + + :raises: RuntimeError if the runtime environment is not acceptable. + + """ + if getattr(sys, 'pypy_version_info', None): + raise RuntimeError( + 'The Qpid transport for Kombu does not ' + 'support PyPy. Try using Python 2.6+', + ) + if PY3: + raise RuntimeError( + 'The Qpid transport for Kombu does not ' + 'support Python 3. Try using Python 2.6+', + ) + + if dependency_is_none(qpidtoollibs): + raise RuntimeError( + 'The Python package "qpidtoollibs" is missing. Install it ' + 'with your package manager. You can also try `pip install ' + 'qpid-tools`.') + + if dependency_is_none(qpid): + raise RuntimeError( + 'The Python package "qpid.messaging" is missing. Install it ' + 'with your package manager. You can also try `pip install ' + 'qpid-python`.') + + def _qpid_message_ready_handler(self, session): + if self.use_async_interface: + os.write(self._w, '0') + + def _qpid_async_exception_notify_handler(self, obj_with_exception, exc): + if self.use_async_interface: + os.write(self._w, 'e') + + def on_readable(self, connection, loop): + """Handle any messages associated with this Transport. + + This method clears a single message from the externally monitored + file descriptor by issuing a read call to the self.r file descriptor + which removes a single '0' character that was placed into the pipe + by the Qpid session message callback handler. Once a '0' is read, + all available events are drained through a call to + :meth:`drain_events`. + + The file descriptor self.r is modified to be non-blocking, ensuring + that an accidental call to this method when no more messages will + not cause indefinite blocking. + + Nothing is expected to be returned from :meth:`drain_events` because + :meth:`drain_events` handles messages by calling callbacks that are + maintained on the :class:`~kombu.transport.qpid.Connection` object. + When :meth:`drain_events` returns, all associated messages have been + handled. + + This method calls drain_events() which reads as many messages as are + available for this Transport, and then returns. It blocks in the + sense that reading and handling a large number of messages may take + time, but it does not block waiting for a new message to arrive. When + :meth:`drain_events` is called a timeout is not specified, which + causes this behavior. + + One interesting behavior of note is where multiple messages are + ready, and this method removes a single '0' character from + self.r, but :meth:`drain_events` may handle an arbitrary amount of + messages. In that case, extra '0' characters may be left on self.r + to be read, where messages corresponding with those '0' characters + have already been handled. The external epoll loop will incorrectly + think additional data is ready for reading, and will call + on_readable unnecessarily, once for each '0' to be read. Additional + calls to :meth:`on_readable` produce no negative side effects, + and will eventually clear out the symbols from the self.r file + descriptor. If new messages show up during this draining period, + they will also be properly handled. + + :param connection: The connection associated with the readable + events, which contains the callbacks that need to be called for + the readable objects. + :type connection: kombu.transport.qpid.Connection + :param loop: The asynchronous loop object that contains epoll like + functionality. + :type loop: kombu.async.Hub + + """ + os.read(self.r, 1) + try: + self.drain_events(connection) + except socket.timeout: + pass + + def register_with_event_loop(self, connection, loop): + """Register a file descriptor and callback with the loop. + + Register the callback self.on_readable to be called when an + external epoll loop sees that the file descriptor registered is + ready for reading. The file descriptor is created by this Transport, + and is written to when a message is available. + + Because supports_ev == True, Celery expects to call this method to + give the Transport an opportunity to register a read file descriptor + for external monitoring by celery using an Event I/O notification + mechanism such as epoll. A callback is also registered that is to + be called once the external epoll loop is ready to handle the epoll + event associated with messages that are ready to be handled for + this Transport. + + The registration call is made exactly once per Transport after the + Transport is instantiated. + + :param connection: A reference to the connection associated with + this Transport. + :type connection: kombu.transport.qpid.Connection + :param loop: A reference to the external loop. + :type loop: kombu.async.hub.Hub + + """ + self.r, self._w = os.pipe() + if fcntl is not None: + fcntl.fcntl(self.r, fcntl.F_SETFL, os.O_NONBLOCK) + self.use_async_interface = True + loop.add_reader(self.r, self.on_readable, connection, loop) + + def establish_connection(self): + """Establish a Connection object. + + Determines the correct options to use when creating any + connections needed by this Transport, and create a + :class:`Connection` object which saves those values for + connections generated as they are needed. The options are a + mixture of what is passed in through the creator of the + Transport, and the defaults provided by + :meth:`default_connection_params`. Options cover broker network + settings, timeout behaviors, authentication, and identity + verification settings. + + This method also creates and stores a + :class:`~qpid.messaging.endpoints.Session` using the + :class:`~qpid.messaging.endpoints.Connection` created by this + method. The Session is stored on self. + + :return: The created :class:`Connection` object is returned. + :rtype: :class:`Connection` + + """ + conninfo = self.client + for name, default_value in items(self.default_connection_params): + if not getattr(conninfo, name, None): + setattr(conninfo, name, default_value) + if conninfo.ssl: + conninfo.qpid_transport = 'ssl' + conninfo.transport_options['ssl_keyfile'] = conninfo.ssl[ + 'keyfile'] + conninfo.transport_options['ssl_certfile'] = conninfo.ssl[ + 'certfile'] + conninfo.transport_options['ssl_trustfile'] = conninfo.ssl[ + 'ca_certs'] + if conninfo.ssl['cert_reqs'] == ssl.CERT_REQUIRED: + conninfo.transport_options['ssl_skip_hostname_check'] = False + else: + conninfo.transport_options['ssl_skip_hostname_check'] = True + else: + conninfo.qpid_transport = 'tcp' + + credentials = {} + if conninfo.login_method is None: + if conninfo.userid is not None and conninfo.password is not None: + sasl_mech = 'PLAIN' + credentials['username'] = conninfo.userid + credentials['password'] = conninfo.password + elif conninfo.userid is None and conninfo.password is not None: + raise Exception( + 'Password configured but no username. SASL PLAIN ' + 'requires a username when using a password.') + elif conninfo.userid is not None and conninfo.password is None: + raise Exception( + 'Username configured but no password. SASL PLAIN ' + 'requires a password when using a username.') + else: + sasl_mech = 'ANONYMOUS' + else: + sasl_mech = conninfo.login_method + if conninfo.userid is not None: + credentials['username'] = conninfo.userid + + opts = { + 'host': conninfo.hostname, + 'port': conninfo.port, + 'sasl_mechanisms': sasl_mech, + 'timeout': conninfo.connect_timeout, + 'transport': conninfo.qpid_transport + } + + opts.update(credentials) + opts.update(conninfo.transport_options) + + conn = self.Connection(**opts) + conn.client = self.client + self.session = conn.get_qpid_connection().session() + self.session.set_message_received_notify_handler( + self._qpid_message_ready_handler + ) + conn.get_qpid_connection().set_async_exception_notify_handler( + self._qpid_async_exception_notify_handler + ) + self.session.set_async_exception_notify_handler( + self._qpid_async_exception_notify_handler + ) + return conn + + def close_connection(self, connection): + """Close the :class:`Connection` object. + + :param connection: The Connection that should be closed. + :type connection: :class:`kombu.transport.qpid.Connection` + + """ + connection.close() + + def drain_events(self, connection, timeout=0, **kwargs): + """Handle and call callbacks for all ready Transport messages. + + Drains all events that are ready from all + :class:`~qpid.messaging.endpoints.Receiver` that are asynchronously + fetching messages. + + For each drained message, the message is called to the appropriate + callback. Callbacks are organized by queue name. + + :param connection: The :class:`~kombu.transport.qpid.Connection` that + contains the callbacks, indexed by queue name, which will be called + by this method. + :type connection: kombu.transport.qpid.Connection + :keyword timeout: The timeout that limits how long this method will + run for. The timeout could interrupt a blocking read that is + waiting for a new message, or cause this method to return before + all messages are drained. Defaults to 0. + :type timeout: int + + """ + start_time = time.time() + elapsed_time = -1 + while elapsed_time < timeout: + try: + receiver = self.session.next_receiver(timeout=timeout) + message = receiver.fetch() + queue = receiver.source + except QpidEmpty: + raise socket.timeout() + else: + connection._callbacks[queue](message) + elapsed_time = time.time() - start_time + raise socket.timeout() + + def create_channel(self, connection): + """Create and return a :class:`~kombu.transport.qpid.Channel`. + + Creates a new channel, and appends the channel to the + list of channels known by the Connection. Once the new + channel is created, it is returned. + + :param connection: The connection that should support the new + :class:`~kombu.transport.qpid.Channel`. + :type connection: kombu.transport.qpid.Connection + + :return: The new Channel that is made. + :rtype: :class:`kombu.transport.qpid.Channel`. + + """ + channel = connection.Channel(connection, self) + connection.channels.append(channel) + return channel + + @property + def default_connection_params(self): + """Return a dict with default connection parameters. + + These connection parameters will be used whenever the creator of + Transport does not specify a required parameter. + + :return: A dict containing the default parameters. + :rtype: dict + + """ + return { + 'hostname': 'localhost', + 'port': 5672, + } + + def __del__(self): + """Ensure file descriptors opened in __init__() are closed.""" + if self.use_async_interface: + for fd in (self.r, self._w): + try: + os.close(fd) + except OSError: + # ignored + pass diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/redis.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/redis.py new file mode 100644 index 0000000..190709e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/redis.py @@ -0,0 +1,1023 @@ +""" +kombu.transport.redis +===================== + +Redis transport. + +""" +from __future__ import absolute_import + +import numbers +import socket + +from bisect import bisect +from collections import namedtuple +from contextlib import contextmanager +from time import time + +from amqp import promise +from anyjson import loads, dumps + +from kombu.exceptions import InconsistencyError, VersionMismatch +from kombu.five import Empty, values, string_t +from kombu.log import get_logger +from kombu.utils import cached_property, uuid +from kombu.utils.eventio import poll, READ, ERR +from kombu.utils.encoding import bytes_to_str +from kombu.utils.url import _parse_url + +from . import virtual + +try: + from billiard.util import register_after_fork +except ImportError: # pragma: no cover + try: + from multiprocessing.util import register_after_fork # noqa + except ImportError: + def register_after_fork(*args, **kwargs): # noqa + pass + +try: + import redis +except ImportError: # pragma: no cover + redis = None # noqa + +logger = get_logger('kombu.transport.redis') +crit, warn = logger.critical, logger.warn + +DEFAULT_PORT = 6379 +DEFAULT_DB = 0 + +PRIORITY_STEPS = [0, 3, 6, 9] + +error_classes_t = namedtuple('error_classes_t', ( + 'connection_errors', 'channel_errors', +)) + +NO_ROUTE_ERROR = """ +Cannot route message for exchange {0!r}: Table empty or key no longer exists. +Probably the key ({1!r}) has been removed from the Redis database. +""" + +# This implementation may seem overly complex, but I assure you there is +# a good reason for doing it this way. +# +# Consuming from several connections enables us to emulate channels, +# which means we can have different service guarantees for individual +# channels. +# +# So we need to consume messages from multiple connections simultaneously, +# and using epoll means we don't have to do so using multiple threads. +# +# Also it means we can easily use PUBLISH/SUBSCRIBE to do fanout +# exchanges (broadcast), as an alternative to pushing messages to fanout-bound +# queues manually. + + +def get_redis_error_classes(): + from redis import exceptions + # This exception suddenly changed name between redis-py versions + if hasattr(exceptions, 'InvalidData'): + DataError = exceptions.InvalidData + else: + DataError = exceptions.DataError + return error_classes_t( + (virtual.Transport.connection_errors + tuple(filter(None, ( + InconsistencyError, + socket.error, + IOError, + OSError, + exceptions.ConnectionError, + exceptions.AuthenticationError, + getattr(exceptions, 'TimeoutError', None))))), + (virtual.Transport.channel_errors + ( + DataError, + exceptions.InvalidResponse, + exceptions.ResponseError)), + ) + + +def get_redis_ConnectionError(): + from redis import exceptions + return exceptions.ConnectionError + + +class MutexHeld(Exception): + pass + + +@contextmanager +def Mutex(client, name, expire): + lock_id = uuid() + i_won = client.setnx(name, lock_id) + try: + if i_won: + client.expire(name, expire) + yield + else: + if not client.ttl(name): + client.expire(name, expire) + raise MutexHeld() + finally: + if i_won: + try: + with client.pipeline(True) as pipe: + pipe.watch(name) + if pipe.get(name) == lock_id: + pipe.multi() + pipe.delete(name) + pipe.execute() + pipe.unwatch() + except redis.WatchError: + pass + + +class QoS(virtual.QoS): + restore_at_shutdown = True + + def __init__(self, *args, **kwargs): + super(QoS, self).__init__(*args, **kwargs) + self._vrestore_count = 0 + + def append(self, message, delivery_tag): + delivery = message.delivery_info + EX, RK = delivery['exchange'], delivery['routing_key'] + with self.pipe_or_acquire() as pipe: + pipe.zadd(self.unacked_index_key, delivery_tag, time()) \ + .hset(self.unacked_key, delivery_tag, + dumps([message._raw, EX, RK])) \ + .execute() + super(QoS, self).append(message, delivery_tag) + + def restore_unacked(self, client=None): + with self.channel.conn_or_acquire(client) as client: + for tag in self._delivered: + self.restore_by_tag(tag, client=client) + self._delivered.clear() + + def ack(self, delivery_tag): + self._remove_from_indices(delivery_tag).execute() + super(QoS, self).ack(delivery_tag) + + def reject(self, delivery_tag, requeue=False): + if requeue: + self.restore_by_tag(delivery_tag, leftmost=True) + self.ack(delivery_tag) + + @contextmanager + def pipe_or_acquire(self, pipe=None, client=None): + if pipe: + yield pipe + else: + with self.channel.conn_or_acquire(client) as client: + yield client.pipeline() + + def _remove_from_indices(self, delivery_tag, pipe=None): + with self.pipe_or_acquire(pipe) as pipe: + return pipe.zrem(self.unacked_index_key, delivery_tag) \ + .hdel(self.unacked_key, delivery_tag) + + def restore_visible(self, start=0, num=10, interval=10): + self._vrestore_count += 1 + if (self._vrestore_count - 1) % interval: + return + with self.channel.conn_or_acquire() as client: + ceil = time() - self.visibility_timeout + try: + with Mutex(client, self.unacked_mutex_key, + self.unacked_mutex_expire): + visible = client.zrevrangebyscore( + self.unacked_index_key, ceil, 0, + start=num and start, num=num, withscores=True) + for tag, score in visible or []: + self.restore_by_tag(tag, client) + except MutexHeld: + pass + + def restore_by_tag(self, tag, client=None, leftmost=False): + with self.channel.conn_or_acquire(client) as client: + with client.pipeline() as pipe: + p, _, _ = self._remove_from_indices( + tag, pipe.hget(self.unacked_key, tag)).execute() + if p: + M, EX, RK = loads(bytes_to_str(p)) # json is unicode + self.channel._do_restore_message(M, EX, RK, client, leftmost) + + @cached_property + def unacked_key(self): + return self.channel.unacked_key + + @cached_property + def unacked_index_key(self): + return self.channel.unacked_index_key + + @cached_property + def unacked_mutex_key(self): + return self.channel.unacked_mutex_key + + @cached_property + def unacked_mutex_expire(self): + return self.channel.unacked_mutex_expire + + @cached_property + def visibility_timeout(self): + return self.channel.visibility_timeout + + +class MultiChannelPoller(object): + eventflags = READ | ERR + + #: Set by :meth:`get` while reading from the socket. + _in_protected_read = False + + #: Set of one-shot callbacks to call after reading from socket. + after_read = None + + def __init__(self): + # active channels + self._channels = set() + # file descriptor -> channel map. + self._fd_to_chan = {} + # channel -> socket map + self._chan_to_sock = {} + # poll implementation (epoll/kqueue/select) + self.poller = poll() + # one-shot callbacks called after reading from socket. + self.after_read = set() + + def close(self): + for fd in values(self._chan_to_sock): + try: + self.poller.unregister(fd) + except (KeyError, ValueError): + pass + self._channels.clear() + self._fd_to_chan.clear() + self._chan_to_sock.clear() + + def add(self, channel): + self._channels.add(channel) + + def discard(self, channel): + self._channels.discard(channel) + + def _on_connection_disconnect(self, connection): + try: + self.poller.unregister(connection._sock) + except (AttributeError, TypeError): + pass + + def _register(self, channel, client, type): + if (channel, client, type) in self._chan_to_sock: + self._unregister(channel, client, type) + if client.connection._sock is None: # not connected yet. + client.connection.connect() + sock = client.connection._sock + self._fd_to_chan[sock.fileno()] = (channel, type) + self._chan_to_sock[(channel, client, type)] = sock + self.poller.register(sock, self.eventflags) + + def _unregister(self, channel, client, type): + self.poller.unregister(self._chan_to_sock[(channel, client, type)]) + + def _register_BRPOP(self, channel): + """enable BRPOP mode for channel.""" + ident = channel, channel.client, 'BRPOP' + if channel.client.connection._sock is None or \ + ident not in self._chan_to_sock: + channel._in_poll = False + self._register(*ident) + + if not channel._in_poll: # send BRPOP + channel._brpop_start() + + def _register_LISTEN(self, channel): + """enable LISTEN mode for channel.""" + if channel.subclient.connection._sock is None: + channel._in_listen = False + self._register(channel, channel.subclient, 'LISTEN') + if not channel._in_listen: + channel._subscribe() # send SUBSCRIBE + + def on_poll_start(self): + for channel in self._channels: + if channel.active_queues: # BRPOP mode? + if channel.qos.can_consume(): + self._register_BRPOP(channel) + if channel.active_fanout_queues: # LISTEN mode? + self._register_LISTEN(channel) + + def on_poll_init(self, poller): + self.poller = poller + for channel in self._channels: + return channel.qos.restore_visible( + num=channel.unacked_restore_limit, + ) + + def maybe_restore_messages(self): + for channel in self._channels: + if channel.active_queues: + # only need to do this once, as they are not local to channel. + return channel.qos.restore_visible( + num=channel.unacked_restore_limit, + ) + + def on_readable(self, fileno): + try: + chan, type = self._fd_to_chan[fileno] + except KeyError: + return + if chan.qos.can_consume(): + return chan.handlers[type]() + + def handle_event(self, fileno, event): + if event & READ: + return self.on_readable(fileno), self + elif event & ERR: + chan, type = self._fd_to_chan[fileno] + chan._poll_error(type) + + def get(self, timeout=None): + self._in_protected_read = True + try: + for channel in self._channels: + if channel.active_queues: # BRPOP mode? + if channel.qos.can_consume(): + self._register_BRPOP(channel) + if channel.active_fanout_queues: # LISTEN mode? + self._register_LISTEN(channel) + + events = self.poller.poll(timeout) + for fileno, event in events or []: + ret = self.handle_event(fileno, event) + if ret: + return ret + + # - no new data, so try to restore messages. + # - reset active redis commands. + self.maybe_restore_messages() + + raise Empty() + finally: + self._in_protected_read = False + while self.after_read: + try: + fun = self.after_read.pop() + except KeyError: + break + else: + fun() + + @property + def fds(self): + return self._fd_to_chan + + +class Channel(virtual.Channel): + QoS = QoS + + _client = None + _subclient = None + _closing = False + supports_fanout = True + keyprefix_queue = '_kombu.binding.%s' + keyprefix_fanout = '/{db}.' + sep = '\x06\x16' + _in_poll = False + _in_listen = False + _fanout_queues = {} + ack_emulation = True + unacked_key = 'unacked' + unacked_index_key = 'unacked_index' + unacked_mutex_key = 'unacked_mutex' + unacked_mutex_expire = 300 # 5 minutes + unacked_restore_limit = None + visibility_timeout = 3600 # 1 hour + priority_steps = PRIORITY_STEPS + socket_timeout = None + socket_connect_timeout = None + socket_keepalive = None + socket_keepalive_options = None + max_connections = 10 + #: Transport option to enable disable fanout keyprefix. + #: Should be enabled by default, but that is not + #: backwards compatible. Can also be string, in which + #: case it changes the default prefix ('/{db}.') into to something + #: else. The prefix must include a leading slash and a trailing dot. + fanout_prefix = False + + #: If enabled the fanout exchange will support patterns in routing + #: and binding keys (like a topic exchange but using PUB/SUB). + #: This will be enabled by default in a future version. + fanout_patterns = False + + _async_pool = None + _pool = None + _disconnecting_pools = False + + from_transport_options = ( + virtual.Channel.from_transport_options + + ('ack_emulation', + 'unacked_key', + 'unacked_index_key', + 'unacked_mutex_key', + 'unacked_mutex_expire', + 'visibility_timeout', + 'unacked_restore_limit', + 'fanout_prefix', + 'fanout_patterns', + 'socket_timeout', + 'socket_connect_timeout', + 'socket_keepalive', + 'socket_keepalive_options', + 'queue_order_strategy', + 'max_connections', + 'priority_steps') # <-- do not add comma here! + ) + + def __init__(self, *args, **kwargs): + super_ = super(Channel, self) + super_.__init__(*args, **kwargs) + + if not self.ack_emulation: # disable visibility timeout + self.QoS = virtual.QoS + + self._queue_cycle = [] + self.AsyncClient = self._get_async_client() + self.Client = redis.Redis + self.ResponseError = self._get_response_error() + self.active_fanout_queues = set() + self.auto_delete_queues = set() + self._fanout_to_queue = {} + self.handlers = {'BRPOP': self._brpop_read, 'LISTEN': self._receive} + + if self.fanout_prefix: + if isinstance(self.fanout_prefix, string_t): + self.keyprefix_fanout = self.fanout_prefix + else: + # previous versions did not set a fanout, so cannot enable + # by default. + self.keyprefix_fanout = '' + + # Evaluate connection. + try: + self.client.info() + except Exception: + self._disconnect_pools() + raise + + self.connection.cycle.add(self) # add to channel poller. + # copy errors, in case channel closed but threads still + # are still waiting for data. + self.connection_errors = self.connection.connection_errors + + register_after_fork(self, self._after_fork) + + def _after_fork(self): + self._disconnect_pools() + + def _disconnect_pools(self): + if not self._disconnecting_pools: + self._disconnecting_pools = True + try: + if self._async_pool is not None: + self._async_pool.disconnect() + if self._pool is not None: + self._pool.disconnect() + self._async_pool = self._pool = None + finally: + self._disconnecting_pools = False + + def _on_connection_disconnect(self, connection): + self._in_poll = False + self._in_listen = False + if self.connection and self.connection.cycle: + self.connection.cycle._on_connection_disconnect(connection) + self._disconnect_pools() + if not self._closing: + raise get_redis_ConnectionError() + + def _do_restore_message(self, payload, exchange, routing_key, + client=None, leftmost=False): + with self.conn_or_acquire(client) as client: + try: + try: + payload['headers']['redelivered'] = True + except KeyError: + pass + for queue in self._lookup(exchange, routing_key): + (client.lpush if leftmost else client.rpush)( + queue, dumps(payload), + ) + except Exception: + crit('Could not restore message: %r', payload, exc_info=True) + + def _restore(self, message, leftmost=False): + if not self.ack_emulation: + return super(Channel, self)._restore(message) + tag = message.delivery_tag + with self.conn_or_acquire() as client: + with client.pipeline() as pipe: + P, _ = pipe.hget(self.unacked_key, tag) \ + .hdel(self.unacked_key, tag) \ + .execute() + if P: + M, EX, RK = loads(bytes_to_str(P)) # json is unicode + self._do_restore_message(M, EX, RK, client, leftmost) + + def _restore_at_beginning(self, message): + return self._restore(message, leftmost=True) + + def basic_consume(self, queue, *args, **kwargs): + if queue in self._fanout_queues: + exchange, _ = self._fanout_queues[queue] + self.active_fanout_queues.add(queue) + self._fanout_to_queue[exchange] = queue + ret = super(Channel, self).basic_consume(queue, *args, **kwargs) + self._update_cycle() + return ret + + def basic_cancel(self, consumer_tag): + # If we are busy reading messages we may experience + # a race condition where a message is consumed after + # cancelling, so we must delay this operation until reading + # is complete (Issue celery/celery#1773). + connection = self.connection + if connection: + if connection.cycle._in_protected_read: + return connection.cycle.after_read.add( + promise(self._basic_cancel, (consumer_tag, )), + ) + return self._basic_cancel(consumer_tag) + + def _basic_cancel(self, consumer_tag): + try: + queue = self._tag_to_queue[consumer_tag] + except KeyError: + return + try: + self.active_fanout_queues.remove(queue) + except KeyError: + pass + else: + self._unsubscribe_from(queue) + try: + exchange, _ = self._fanout_queues[queue] + self._fanout_to_queue.pop(exchange) + except KeyError: + pass + ret = super(Channel, self).basic_cancel(consumer_tag) + self._update_cycle() + return ret + + def _get_publish_topic(self, exchange, routing_key): + if routing_key and self.fanout_patterns: + return ''.join([self.keyprefix_fanout, exchange, '/', routing_key]) + return ''.join([self.keyprefix_fanout, exchange]) + + def _get_subscribe_topic(self, queue): + exchange, routing_key = self._fanout_queues[queue] + return self._get_publish_topic(exchange, routing_key) + + def _subscribe(self): + keys = [self._get_subscribe_topic(queue) + for queue in self.active_fanout_queues] + if not keys: + return + c = self.subclient + if c.connection._sock is None: + c.connection.connect() + self._in_listen = True + c.psubscribe(keys) + + def _unsubscribe_from(self, queue): + topic = self._get_subscribe_topic(queue) + c = self.subclient + should_disconnect = False + if c.connection._sock is None: + c.connection.connect() + should_disconnect = True + try: + c.unsubscribe([topic]) + finally: + if should_disconnect and c.connection: + c.connection.disconnect() + + def _handle_message(self, client, r): + if bytes_to_str(r[0]) == 'unsubscribe' and r[2] == 0: + client.subscribed = False + elif bytes_to_str(r[0]) == 'pmessage': + return {'type': r[0], 'pattern': r[1], + 'channel': r[2], 'data': r[3]} + else: + return {'type': r[0], 'pattern': None, + 'channel': r[1], 'data': r[2]} + + def _receive(self): + c = self.subclient + response = None + try: + response = c.parse_response() + except self.connection_errors: + self._in_listen = False + raise Empty() + if response is not None: + payload = self._handle_message(c, response) + if bytes_to_str(payload['type']).endswith('message'): + channel = bytes_to_str(payload['channel']) + if payload['data']: + if channel[0] == '/': + _, _, channel = channel.partition('.') + try: + message = loads(bytes_to_str(payload['data'])) + except (TypeError, ValueError): + warn('Cannot process event on channel %r: %s', + channel, repr(payload)[:4096], exc_info=1) + raise Empty() + exchange = channel.split('/', 1)[0] + return message, self._fanout_to_queue[exchange] + raise Empty() + + def _brpop_start(self, timeout=1): + queues = self._consume_cycle() + if not queues: + return + keys = [self._q_for_pri(queue, pri) for pri in PRIORITY_STEPS + for queue in queues] + [timeout or 0] + self._in_poll = True + self.client.connection.send_command('BRPOP', *keys) + + def _brpop_read(self, **options): + try: + try: + dest__item = self.client.parse_response(self.client.connection, + 'BRPOP', + **options) + except self.connection_errors: + # if there's a ConnectionError, disconnect so the next + # iteration will reconnect automatically. + self.client.connection.disconnect() + raise Empty() + if dest__item: + dest, item = dest__item + dest = bytes_to_str(dest).rsplit(self.sep, 1)[0] + self._rotate_cycle(dest) + return loads(bytes_to_str(item)), dest + else: + raise Empty() + finally: + self._in_poll = False + + def _poll_error(self, type, **options): + if type == 'LISTEN': + self.subclient.parse_response() + else: + self.client.parse_response(self.client.connection, type) + + def _get(self, queue): + with self.conn_or_acquire() as client: + for pri in PRIORITY_STEPS: + item = client.rpop(self._q_for_pri(queue, pri)) + if item: + return loads(bytes_to_str(item)) + raise Empty() + + def _size(self, queue): + with self.conn_or_acquire() as client: + with client.pipeline() as pipe: + for pri in PRIORITY_STEPS: + pipe = pipe.llen(self._q_for_pri(queue, pri)) + sizes = pipe.execute() + return sum(size for size in sizes + if isinstance(size, numbers.Integral)) + + def _q_for_pri(self, queue, pri): + pri = self.priority(pri) + return '%s%s%s' % ((queue, self.sep, pri) if pri else (queue, '', '')) + + def priority(self, n): + steps = self.priority_steps + return steps[bisect(steps, n) - 1] + + def _put(self, queue, message, **kwargs): + """Deliver message.""" + try: + pri = max(min(int( + message['properties']['delivery_info']['priority']), 9), 0) + except (TypeError, ValueError, KeyError): + pri = 0 + with self.conn_or_acquire() as client: + client.lpush(self._q_for_pri(queue, pri), dumps(message)) + + def _put_fanout(self, exchange, message, routing_key, **kwargs): + """Deliver fanout message.""" + with self.conn_or_acquire() as client: + client.publish( + self._get_publish_topic(exchange, routing_key), + dumps(message), + ) + + def _new_queue(self, queue, auto_delete=False, **kwargs): + if auto_delete: + self.auto_delete_queues.add(queue) + + def _queue_bind(self, exchange, routing_key, pattern, queue): + if self.typeof(exchange).type == 'fanout': + # Mark exchange as fanout. + self._fanout_queues[queue] = ( + exchange, routing_key.replace('#', '*'), + ) + with self.conn_or_acquire() as client: + client.sadd(self.keyprefix_queue % (exchange, ), + self.sep.join([routing_key or '', + pattern or '', + queue or ''])) + + def _delete(self, queue, exchange, routing_key, pattern, *args): + self.auto_delete_queues.discard(queue) + with self.conn_or_acquire() as client: + client.srem(self.keyprefix_queue % (exchange, ), + self.sep.join([routing_key or '', + pattern or '', + queue or ''])) + with client.pipeline() as pipe: + for pri in PRIORITY_STEPS: + pipe = pipe.delete(self._q_for_pri(queue, pri)) + pipe.execute() + + def _has_queue(self, queue, **kwargs): + with self.conn_or_acquire() as client: + with client.pipeline() as pipe: + for pri in PRIORITY_STEPS: + pipe = pipe.exists(self._q_for_pri(queue, pri)) + return any(pipe.execute()) + + def get_table(self, exchange): + key = self.keyprefix_queue % exchange + with self.conn_or_acquire() as client: + values = client.smembers(key) + if not values: + raise InconsistencyError(NO_ROUTE_ERROR.format(exchange, key)) + return [tuple(bytes_to_str(val).split(self.sep)) for val in values] + + def _purge(self, queue): + with self.conn_or_acquire() as client: + with client.pipeline() as pipe: + for pri in PRIORITY_STEPS: + priq = self._q_for_pri(queue, pri) + pipe = pipe.llen(priq).delete(priq) + sizes = pipe.execute() + return sum(sizes[::2]) + + def close(self): + self._closing = True + self._disconnect_pools() + if not self.closed: + # remove from channel poller. + self.connection.cycle.discard(self) + + # delete fanout bindings + for queue in self._fanout_queues: + if queue in self.auto_delete_queues: + self.queue_delete(queue) + + self._close_clients() + + super(Channel, self).close() + + def _close_clients(self): + # Close connections + for attr in 'client', 'subclient': + try: + self.__dict__[attr].connection.disconnect() + except (KeyError, AttributeError, self.ResponseError): + pass + + def _prepare_virtual_host(self, vhost): + if not isinstance(vhost, numbers.Integral): + if not vhost or vhost == '/': + vhost = DEFAULT_DB + elif vhost.startswith('/'): + vhost = vhost[1:] + try: + vhost = int(vhost) + except ValueError: + raise ValueError( + 'Database is int between 0 and limit - 1, not {0}'.format( + vhost, + )) + return vhost + + def _filter_tcp_connparams(self, socket_keepalive=None, + socket_keepalive_options=None, **params): + return params + + def _connparams(self, async=False, _r210_options=( + 'socket_connect_timeout', 'socket_keepalive', + 'socket_keepalive_options')): + conninfo = self.connection.client + connparams = { + 'host': conninfo.hostname or '127.0.0.1', + 'port': conninfo.port or DEFAULT_PORT, + 'virtual_host': conninfo.virtual_host, + 'password': conninfo.password, + 'max_connections': self.max_connections, + 'socket_timeout': self.socket_timeout, + 'socket_connect_timeout': self.socket_connect_timeout, + 'socket_keepalive': self.socket_keepalive, + 'socket_keepalive_options': self.socket_keepalive_options, + } + if redis.VERSION < (2, 10): + for param in _r210_options: + val = connparams.pop(param, None) + if val is not None: + raise VersionMismatch( + 'redis: {0!r} requires redis 2.10.0 or higher'.format( + param)) + host = connparams['host'] + if '://' in host: + scheme, _, _, _, password, path, query = _parse_url(host) + if scheme == 'socket': + connparams = self._filter_tcp_connparams(**connparams) + connparams.update({ + 'connection_class': redis.UnixDomainSocketConnection, + 'path': '/' + path, + 'password': password}, **query) + + connparams.pop('socket_connect_timeout', None) + connparams.pop('socket_keepalive', None) + connparams.pop('socket_keepalive_options', None) + + connparams.pop('host', None) + connparams.pop('port', None) + connparams['db'] = self._prepare_virtual_host( + connparams.pop('virtual_host', None)) + + channel = self + connection_cls = ( + connparams.get('connection_class') or + redis.Connection + ) + + if async: + class Connection(connection_cls): + def disconnect(self): + super(Connection, self).disconnect() + channel._on_connection_disconnect(self) + connparams['connection_class'] = Connection + + return connparams + + def _create_client(self, async=False): + if async: + return self.AsyncClient(connection_pool=self.async_pool) + return self.Client(connection_pool=self.pool) + + def _get_pool(self, async=False): + params = self._connparams(async=async) + self.keyprefix_fanout = self.keyprefix_fanout.format(db=params['db']) + return redis.ConnectionPool(**params) + + def _get_async_client(self): + if redis.VERSION < (2, 4, 4): + raise VersionMismatch( + 'Redis transport requires redis-py versions 2.4.4 or later. ' + 'You have {0.__version__}'.format(redis)) + + # AsyncRedis maintains a connection attribute on it's instance and + # uses that when executing commands + # This was added after redis-py was changed. + class AsyncRedis(redis.Redis): # pragma: no cover + + def __init__(self, *args, **kwargs): + super(AsyncRedis, self).__init__(*args, **kwargs) + self.connection = self.connection_pool.get_connection('_') + + return AsyncRedis + + @contextmanager + def conn_or_acquire(self, client=None): + if client: + yield client + else: + yield self._create_client() + + @property + def pool(self): + if self._pool is None: + self._pool = self._get_pool() + return self._pool + + @property + def async_pool(self): + if self._async_pool is None: + self._async_pool = self._get_pool(async=True) + return self._async_pool + + @cached_property + def client(self): + """Client used to publish messages, BRPOP etc.""" + return self._create_client(async=True) + + @cached_property + def subclient(self): + """Pub/Sub connection used to consume fanout queues.""" + client = self._create_client(async=True) + pubsub = client.pubsub() + pool = pubsub.connection_pool + pubsub.connection = pool.get_connection('pubsub', pubsub.shard_hint) + return pubsub + + def _update_cycle(self): + """Update fair cycle between queues. + + We cycle between queues fairly to make sure that + each queue is equally likely to be consumed from, + so that a very busy queue will not block others. + + This works by using Redis's `BRPOP` command and + by rotating the most recently used queue to the + and of the list. See Kombu github issue #166 for + more discussion of this method. + + """ + self._queue_cycle = list(self.active_queues) + + def _consume_cycle(self): + """Get a fresh list of queues from the queue cycle.""" + active = len(self.active_queues) + return self._queue_cycle[0:active] + + def _rotate_cycle(self, used): + """Move most recently used queue to end of list.""" + cycle = self._queue_cycle + try: + cycle.append(cycle.pop(cycle.index(used))) + except ValueError: + pass + + def _get_response_error(self): + from redis import exceptions + return exceptions.ResponseError + + @property + def active_queues(self): + """Set of queues being consumed from (excluding fanout queues).""" + return set(queue for queue in self._active_queues + if queue not in self.active_fanout_queues) + + +class Transport(virtual.Transport): + Channel = Channel + + polling_interval = None # disable sleep between unsuccessful polls. + default_port = DEFAULT_PORT + supports_ev = True + driver_type = 'redis' + driver_name = 'redis' + + def __init__(self, *args, **kwargs): + if redis is None: + raise ImportError('Missing redis library (pip install redis)') + super(Transport, self).__init__(*args, **kwargs) + + # Get redis-py exceptions. + self.connection_errors, self.channel_errors = self._get_errors() + # All channels share the same poller. + self.cycle = MultiChannelPoller() + + def driver_version(self): + return redis.__version__ + + def register_with_event_loop(self, connection, loop): + cycle = self.cycle + cycle.on_poll_init(loop.poller) + cycle_poll_start = cycle.on_poll_start + add_reader = loop.add_reader + on_readable = self.on_readable + + def _on_disconnect(connection): + if connection._sock: + loop.remove(connection._sock) + cycle._on_connection_disconnect = _on_disconnect + + def on_poll_start(): + cycle_poll_start() + [add_reader(fd, on_readable, fd) for fd in cycle.fds] + loop.on_tick.add(on_poll_start) + loop.call_repeatedly(10, cycle.maybe_restore_messages) + + def on_readable(self, fileno): + """Handle AIO event for one of our file descriptors.""" + item = self.cycle.on_readable(fileno) + if item: + message, queue = item + if not queue or queue not in self._callbacks: + raise KeyError( + 'Message for queue {0!r} without consumers: {1}'.format( + queue, message)) + self._callbacks[queue](message) + + def _get_errors(self): + """Utility to import redis-py's exceptions at runtime.""" + return get_redis_error_classes() diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/sqlalchemy/__init__.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/sqlalchemy/__init__.py new file mode 100644 index 0000000..3aab155 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/sqlalchemy/__init__.py @@ -0,0 +1,160 @@ +"""Kombu transport using SQLAlchemy as the message store.""" +# SQLAlchemy overrides != False to have special meaning and pep8 complains +# flake8: noqa + +from __future__ import absolute_import + +from anyjson import loads, dumps +from sqlalchemy import create_engine +from sqlalchemy.exc import OperationalError +from sqlalchemy.orm import sessionmaker + +from kombu.five import Empty +from kombu.transport import virtual +from kombu.utils import cached_property +from kombu.utils.encoding import bytes_to_str + +from .models import (ModelBase, Queue as QueueBase, Message as MessageBase, + class_registry, metadata) + + +VERSION = (1, 1, 0) +__version__ = '.'.join(map(str, VERSION)) + + +class Channel(virtual.Channel): + _session = None + _engines = {} # engine cache + + def __init__(self, connection, **kwargs): + self._configure_entity_tablenames(connection.client.transport_options) + super(Channel, self).__init__(connection, **kwargs) + + def _configure_entity_tablenames(self, opts): + self.queue_tablename = opts.get('queue_tablename', 'kombu_queue') + self.message_tablename = opts.get('message_tablename', 'kombu_message') + + # + # Define the model definitions. This registers the declarative + # classes with the active SQLAlchemy metadata object. This *must* be + # done prior to the ``create_engine`` call. + # + self.queue_cls and self.message_cls + + def _engine_from_config(self): + conninfo = self.connection.client + transport_options = conninfo.transport_options.copy() + transport_options.pop('queue_tablename', None) + transport_options.pop('message_tablename', None) + return create_engine(conninfo.hostname, **transport_options) + + def _open(self): + conninfo = self.connection.client + if conninfo.hostname not in self._engines: + engine = self._engine_from_config() + Session = sessionmaker(bind=engine) + metadata.create_all(engine) + self._engines[conninfo.hostname] = engine, Session + return self._engines[conninfo.hostname] + + @property + def session(self): + if self._session is None: + _, Session = self._open() + self._session = Session() + return self._session + + def _get_or_create(self, queue): + obj = self.session.query(self.queue_cls) \ + .filter(self.queue_cls.name == queue).first() + if not obj: + obj = self.queue_cls(queue) + self.session.add(obj) + try: + self.session.commit() + except OperationalError: + self.session.rollback() + return obj + + def _new_queue(self, queue, **kwargs): + self._get_or_create(queue) + + def _put(self, queue, payload, **kwargs): + obj = self._get_or_create(queue) + message = self.message_cls(dumps(payload), obj) + self.session.add(message) + try: + self.session.commit() + except OperationalError: + self.session.rollback() + + def _get(self, queue): + obj = self._get_or_create(queue) + if self.session.bind.name == 'sqlite': + self.session.execute('BEGIN IMMEDIATE TRANSACTION') + try: + msg = self.session.query(self.message_cls) \ + .with_lockmode('update') \ + .filter(self.message_cls.queue_id == obj.id) \ + .filter(self.message_cls.visible != False) \ + .order_by(self.message_cls.sent_at) \ + .order_by(self.message_cls.id) \ + .limit(1) \ + .first() + if msg: + msg.visible = False + return loads(bytes_to_str(msg.payload)) + raise Empty() + finally: + self.session.commit() + + def _query_all(self, queue): + obj = self._get_or_create(queue) + return self.session.query(self.message_cls) \ + .filter(self.message_cls.queue_id == obj.id) + + def _purge(self, queue): + count = self._query_all(queue).delete(synchronize_session=False) + try: + self.session.commit() + except OperationalError: + self.session.rollback() + return count + + def _size(self, queue): + return self._query_all(queue).count() + + def _declarative_cls(self, name, base, ns): + if name in class_registry: + return class_registry[name] + return type(name, (base, ModelBase), ns) + + @cached_property + def queue_cls(self): + return self._declarative_cls( + 'Queue', + QueueBase, + {'__tablename__': self.queue_tablename} + ) + + @cached_property + def message_cls(self): + return self._declarative_cls( + 'Message', + MessageBase, + {'__tablename__': self.message_tablename} + ) + + +class Transport(virtual.Transport): + Channel = Channel + + can_parse_url = True + default_port = 0 + driver_type = 'sql' + driver_name = 'sqlalchemy' + connection_errors = (OperationalError, ) + + def driver_version(self): + import sqlalchemy + return sqlalchemy.__version__ diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/sqlalchemy/models.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/sqlalchemy/models.py new file mode 100644 index 0000000..4fa2bfe --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/sqlalchemy/models.py @@ -0,0 +1,62 @@ +from __future__ import absolute_import + +import datetime + +from sqlalchemy import (Column, Integer, String, Text, DateTime, + Sequence, Boolean, ForeignKey, SmallInteger) +from sqlalchemy.orm import relation +from sqlalchemy.ext.declarative import declarative_base, declared_attr +from sqlalchemy.schema import MetaData + +class_registry = {} +metadata = MetaData() +ModelBase = declarative_base(metadata=metadata, class_registry=class_registry) + + +class Queue(object): + __table_args__ = {'sqlite_autoincrement': True, 'mysql_engine': 'InnoDB'} + + id = Column(Integer, Sequence('queue_id_sequence'), primary_key=True, + autoincrement=True) + name = Column(String(200), unique=True) + + def __init__(self, name): + self.name = name + + def __str__(self): + return ''.format(self=self) + + @declared_attr + def messages(cls): + return relation('Message', backref='queue', lazy='noload') + + +class Message(object): + __table_args__ = {'sqlite_autoincrement': True, 'mysql_engine': 'InnoDB'} + + id = Column(Integer, Sequence('message_id_sequence'), + primary_key=True, autoincrement=True) + visible = Column(Boolean, default=True, index=True) + sent_at = Column('timestamp', DateTime, nullable=True, index=True, + onupdate=datetime.datetime.now) + payload = Column(Text, nullable=False) + version = Column(SmallInteger, nullable=False, default=1) + + __mapper_args__ = {'version_id_col': version} + + def __init__(self, payload, queue): + self.payload = payload + self.queue = queue + + def __str__(self): + return ''.format(self) + + @declared_attr + def queue_id(self): + return Column( + Integer, + ForeignKey( + '%s.id' % class_registry['Queue'].__tablename__, + name='FK_kombu_message_queue' + ) + ) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/virtual/__init__.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/virtual/__init__.py new file mode 100644 index 0000000..ddcca47 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/virtual/__init__.py @@ -0,0 +1,854 @@ +""" +kombu.transport.virtual +======================= + +Virtual transport implementation. + +Emulates the AMQ API for non-AMQ transports. + +""" +from __future__ import absolute_import, unicode_literals + +import base64 +import socket +import sys +import warnings + +from array import array +from itertools import count +from multiprocessing.util import Finalize +from time import sleep + +from amqp.protocol import queue_declare_ok_t + +from kombu.exceptions import ResourceError, ChannelError +from kombu.five import Empty, items, monotonic +from kombu.utils import emergency_dump_state, kwdict, say, uuid +from kombu.utils.compat import OrderedDict +from kombu.utils.encoding import str_to_bytes, bytes_to_str + +from kombu.transport import base + +from .scheduling import FairCycle +from .exchange import STANDARD_EXCHANGE_TYPES + +ARRAY_TYPE_H = 'H' if sys.version_info[0] == 3 else b'H' + +UNDELIVERABLE_FMT = """\ +Message could not be delivered: No queues bound to exchange {exchange!r} \ +using binding key {routing_key!r}. +""" + +NOT_EQUIVALENT_FMT = """\ +Cannot redeclare exchange {0!r} in vhost {1!r} with \ +different type, durable, autodelete or arguments value.\ +""" + + +class Base64(object): + + def encode(self, s): + return bytes_to_str(base64.b64encode(str_to_bytes(s))) + + def decode(self, s): + return base64.b64decode(str_to_bytes(s)) + + +class NotEquivalentError(Exception): + """Entity declaration is not equivalent to the previous declaration.""" + pass + + +class UndeliverableWarning(UserWarning): + """The message could not be delivered to a queue.""" + pass + + +class BrokerState(object): + + #: exchange declarations. + exchanges = None + + #: active bindings. + bindings = None + + def __init__(self, exchanges=None, bindings=None): + self.exchanges = {} if exchanges is None else exchanges + self.bindings = {} if bindings is None else bindings + + def clear(self): + self.exchanges.clear() + self.bindings.clear() + + +class QoS(object): + """Quality of Service guarantees. + + Only supports `prefetch_count` at this point. + + :param channel: AMQ Channel. + :keyword prefetch_count: Initial prefetch count (defaults to 0). + + """ + + #: current prefetch count value + prefetch_count = 0 + + #: :class:`~collections.OrderedDict` of active messages. + #: *NOTE*: Can only be modified by the consuming thread. + _delivered = None + + #: acks can be done by other threads than the consuming thread. + #: Instead of a mutex, which doesn't perform well here, we mark + #: the delivery tags as dirty, so subsequent calls to append() can remove + #: them. + _dirty = None + + #: If disabled, unacked messages won't be restored at shutdown. + restore_at_shutdown = True + + def __init__(self, channel, prefetch_count=0): + self.channel = channel + self.prefetch_count = prefetch_count or 0 + + self._delivered = OrderedDict() + self._delivered.restored = False + self._dirty = set() + self._quick_ack = self._dirty.add + self._quick_append = self._delivered.__setitem__ + self._on_collect = Finalize( + self, self.restore_unacked_once, exitpriority=1, + ) + + def can_consume(self): + """Return true if the channel can be consumed from. + + Used to ensure the client adhers to currently active + prefetch limits. + + """ + pcount = self.prefetch_count + return not pcount or len(self._delivered) - len(self._dirty) < pcount + + def can_consume_max_estimate(self): + """Returns the maximum number of messages allowed to be returned. + + Returns an estimated number of messages that a consumer may be allowed + to consume at once from the broker. This is used for services where + bulk 'get message' calls are preferred to many individual 'get message' + calls - like SQS. + + returns: + An integer > 0 + """ + pcount = self.prefetch_count + if pcount: + return max(pcount - (len(self._delivered) - len(self._dirty)), 0) + + def append(self, message, delivery_tag): + """Append message to transactional state.""" + if self._dirty: + self._flush() + self._quick_append(delivery_tag, message) + + def get(self, delivery_tag): + return self._delivered[delivery_tag] + + def _flush(self): + """Flush dirty (acked/rejected) tags from.""" + dirty = self._dirty + delivered = self._delivered + while 1: + try: + dirty_tag = dirty.pop() + except KeyError: + break + delivered.pop(dirty_tag, None) + + def ack(self, delivery_tag): + """Acknowledge message and remove from transactional state.""" + self._quick_ack(delivery_tag) + + def reject(self, delivery_tag, requeue=False): + """Remove from transactional state and requeue message.""" + if requeue: + self.channel._restore_at_beginning(self._delivered[delivery_tag]) + self._quick_ack(delivery_tag) + + def restore_unacked(self): + """Restore all unacknowledged messages.""" + self._flush() + delivered = self._delivered + errors = [] + restore = self.channel._restore + pop_message = delivered.popitem + + while delivered: + try: + _, message = pop_message() + except KeyError: # pragma: no cover + break + + try: + restore(message) + except BaseException as exc: + errors.append((exc, message)) + delivered.clear() + return errors + + def restore_unacked_once(self): + """Restores all unacknowledged messages at shutdown/gc collect. + + Will only be done once for each instance. + + """ + self._on_collect.cancel() + self._flush() + state = self._delivered + + if not self.restore_at_shutdown or not self.channel.do_restore: + return + if getattr(state, 'restored', None): + assert not state + return + try: + if state: + say('Restoring {0!r} unacknowledged message(s).', + len(self._delivered)) + unrestored = self.restore_unacked() + + if unrestored: + errors, messages = list(zip(*unrestored)) + say('UNABLE TO RESTORE {0} MESSAGES: {1}', + len(errors), errors) + emergency_dump_state(messages) + finally: + state.restored = True + + def restore_visible(self, *args, **kwargs): + """Restore any pending unackwnowledged messages for visibility_timeout + style implementations. + + Optional: Currently only used by the Redis transport. + + """ + pass + + +class Message(base.Message): + + def __init__(self, channel, payload, **kwargs): + self._raw = payload + properties = payload['properties'] + body = payload.get('body') + if body: + body = channel.decode_body(body, properties.get('body_encoding')) + kwargs.update({ + 'body': body, + 'delivery_tag': properties['delivery_tag'], + 'content_type': payload.get('content-type'), + 'content_encoding': payload.get('content-encoding'), + 'headers': payload.get('headers'), + 'properties': properties, + 'delivery_info': properties.get('delivery_info'), + 'postencode': 'utf-8', + }) + super(Message, self).__init__(channel, **kwdict(kwargs)) + + def serializable(self): + props = self.properties + body, _ = self.channel.encode_body(self.body, + props.get('body_encoding')) + headers = dict(self.headers) + # remove compression header + headers.pop('compression', None) + return { + 'body': body, + 'properties': props, + 'content-type': self.content_type, + 'content-encoding': self.content_encoding, + 'headers': headers, + } + + +class AbstractChannel(object): + """This is an abstract class defining the channel methods + you'd usually want to implement in a virtual channel. + + Do not subclass directly, but rather inherit from :class:`Channel` + instead. + + """ + + def _get(self, queue, timeout=None): + """Get next message from `queue`.""" + raise NotImplementedError('Virtual channels must implement _get') + + def _put(self, queue, message): + """Put `message` onto `queue`.""" + raise NotImplementedError('Virtual channels must implement _put') + + def _purge(self, queue): + """Remove all messages from `queue`.""" + raise NotImplementedError('Virtual channels must implement _purge') + + def _size(self, queue): + """Return the number of messages in `queue` as an :class:`int`.""" + return 0 + + def _delete(self, queue, *args, **kwargs): + """Delete `queue`. + + This just purges the queue, if you need to do more you can + override this method. + + """ + self._purge(queue) + + def _new_queue(self, queue, **kwargs): + """Create new queue. + + Your transport can override this method if it needs + to do something whenever a new queue is declared. + + """ + pass + + def _has_queue(self, queue, **kwargs): + """Verify that queue exists. + + Should return :const:`True` if the queue exists or :const:`False` + otherwise. + + """ + return True + + def _poll(self, cycle, timeout=None): + """Poll a list of queues for available messages.""" + return cycle.get() + + +class Channel(AbstractChannel, base.StdChannel): + """Virtual channel. + + :param connection: The transport instance this channel is part of. + + """ + #: message class used. + Message = Message + + #: QoS class used. + QoS = QoS + + #: flag to restore unacked messages when channel + #: goes out of scope. + do_restore = True + + #: mapping of exchange types and corresponding classes. + exchange_types = dict(STANDARD_EXCHANGE_TYPES) + + #: flag set if the channel supports fanout exchanges. + supports_fanout = False + + #: Binary <-> ASCII codecs. + codecs = {'base64': Base64()} + + #: Default body encoding. + #: NOTE: ``transport_options['body_encoding']`` will override this value. + body_encoding = 'base64' + + #: counter used to generate delivery tags for this channel. + _delivery_tags = count(1) + + #: Optional queue where messages with no route is delivered. + #: Set by ``transport_options['deadletter_queue']``. + deadletter_queue = None + + # List of options to transfer from :attr:`transport_options`. + from_transport_options = ('body_encoding', 'deadletter_queue') + + def __init__(self, connection, **kwargs): + self.connection = connection + self._consumers = set() + self._cycle = None + self._tag_to_queue = {} + self._active_queues = [] + self._qos = None + self.closed = False + + # instantiate exchange types + self.exchange_types = dict( + (typ, cls(self)) for typ, cls in items(self.exchange_types) + ) + + try: + self.channel_id = self.connection._avail_channel_ids.pop() + except IndexError: + raise ResourceError( + 'No free channel ids, current={0}, channel_max={1}'.format( + len(self.connection.channels), + self.connection.channel_max), (20, 10), + ) + + topts = self.connection.client.transport_options + for opt_name in self.from_transport_options: + try: + setattr(self, opt_name, topts[opt_name]) + except KeyError: + pass + + def exchange_declare(self, exchange=None, type='direct', durable=False, + auto_delete=False, arguments=None, + nowait=False, passive=False): + """Declare exchange.""" + type = type or 'direct' + exchange = exchange or 'amq.%s' % type + if passive: + if exchange not in self.state.exchanges: + raise ChannelError( + 'NOT_FOUND - no exchange {0!r} in vhost {1!r}'.format( + exchange, self.connection.client.virtual_host or '/'), + (50, 10), 'Channel.exchange_declare', '404', + ) + return + try: + prev = self.state.exchanges[exchange] + if not self.typeof(exchange).equivalent(prev, exchange, type, + durable, auto_delete, + arguments): + raise NotEquivalentError(NOT_EQUIVALENT_FMT.format( + exchange, self.connection.client.virtual_host or '/')) + except KeyError: + self.state.exchanges[exchange] = { + 'type': type, + 'durable': durable, + 'auto_delete': auto_delete, + 'arguments': arguments or {}, + 'table': [], + } + + def exchange_delete(self, exchange, if_unused=False, nowait=False): + """Delete `exchange` and all its bindings.""" + for rkey, _, queue in self.get_table(exchange): + self.queue_delete(queue, if_unused=True, if_empty=True) + self.state.exchanges.pop(exchange, None) + + def queue_declare(self, queue=None, passive=False, **kwargs): + """Declare queue.""" + queue = queue or 'amq.gen-%s' % uuid() + if passive and not self._has_queue(queue, **kwargs): + raise ChannelError( + 'NOT_FOUND - no queue {0!r} in vhost {1!r}'.format( + queue, self.connection.client.virtual_host or '/'), + (50, 10), 'Channel.queue_declare', '404', + ) + else: + self._new_queue(queue, **kwargs) + return queue_declare_ok_t(queue, self._size(queue), 0) + + def queue_delete(self, queue, if_unused=False, if_empty=False, **kwargs): + """Delete queue.""" + if if_empty and self._size(queue): + return + try: + exchange, routing_key, arguments = self.state.bindings[queue] + except KeyError: + return + meta = self.typeof(exchange).prepare_bind( + queue, exchange, routing_key, arguments, + ) + self._delete(queue, exchange, *meta) + self.state.bindings.pop(queue, None) + + def after_reply_message_received(self, queue): + self.queue_delete(queue) + + def exchange_bind(self, destination, source='', routing_key='', + nowait=False, arguments=None): + raise NotImplementedError('transport does not support exchange_bind') + + def exchange_unbind(self, destination, source='', routing_key='', + nowait=False, arguments=None): + raise NotImplementedError('transport does not support exchange_unbind') + + def queue_bind(self, queue, exchange=None, routing_key='', + arguments=None, **kwargs): + """Bind `queue` to `exchange` with `routing key`.""" + if queue in self.state.bindings: + return + exchange = exchange or 'amq.direct' + table = self.state.exchanges[exchange].setdefault('table', []) + self.state.bindings[queue] = exchange, routing_key, arguments + meta = self.typeof(exchange).prepare_bind( + queue, exchange, routing_key, arguments, + ) + table.append(meta) + if self.supports_fanout: + self._queue_bind(exchange, *meta) + + def queue_unbind(self, queue, exchange=None, routing_key='', + arguments=None, **kwargs): + raise NotImplementedError('transport does not support queue_unbind') + + def list_bindings(self): + return ((queue, exchange, rkey) + for exchange in self.state.exchanges + for rkey, pattern, queue in self.get_table(exchange)) + + def queue_purge(self, queue, **kwargs): + """Remove all ready messages from queue.""" + return self._purge(queue) + + def _next_delivery_tag(self): + return uuid() + + def basic_publish(self, message, exchange, routing_key, **kwargs): + """Publish message.""" + message['body'], body_encoding = self.encode_body( + message['body'], self.body_encoding, + ) + props = message['properties'] + props.update( + body_encoding=body_encoding, + delivery_tag=self._next_delivery_tag(), + ) + props['delivery_info'].update( + exchange=exchange, + routing_key=routing_key, + ) + if exchange: + return self.typeof(exchange).deliver( + message, exchange, routing_key, **kwargs + ) + # anon exchange: routing_key is the destination queue + return self._put(routing_key, message, **kwargs) + + def basic_consume(self, queue, no_ack, callback, consumer_tag, **kwargs): + """Consume from `queue`""" + self._tag_to_queue[consumer_tag] = queue + self._active_queues.append(queue) + + def _callback(raw_message): + message = self.Message(self, raw_message) + if not no_ack: + self.qos.append(message, message.delivery_tag) + return callback(message) + + self.connection._callbacks[queue] = _callback + self._consumers.add(consumer_tag) + + self._reset_cycle() + + def basic_cancel(self, consumer_tag): + """Cancel consumer by consumer tag.""" + if consumer_tag in self._consumers: + self._consumers.remove(consumer_tag) + self._reset_cycle() + queue = self._tag_to_queue.pop(consumer_tag, None) + try: + self._active_queues.remove(queue) + except ValueError: + pass + self.connection._callbacks.pop(queue, None) + + def basic_get(self, queue, no_ack=False, **kwargs): + """Get message by direct access (synchronous).""" + try: + message = self.Message(self, self._get(queue)) + if not no_ack: + self.qos.append(message, message.delivery_tag) + return message + except Empty: + pass + + def basic_ack(self, delivery_tag): + """Acknowledge message.""" + self.qos.ack(delivery_tag) + + def basic_recover(self, requeue=False): + """Recover unacked messages.""" + if requeue: + return self.qos.restore_unacked() + raise NotImplementedError('Does not support recover(requeue=False)') + + def basic_reject(self, delivery_tag, requeue=False): + """Reject message.""" + self.qos.reject(delivery_tag, requeue=requeue) + + def basic_qos(self, prefetch_size=0, prefetch_count=0, + apply_global=False): + """Change QoS settings for this channel. + + Only `prefetch_count` is supported. + + """ + self.qos.prefetch_count = prefetch_count + + def get_exchanges(self): + return list(self.state.exchanges) + + def get_table(self, exchange): + """Get table of bindings for `exchange`.""" + return self.state.exchanges[exchange]['table'] + + def typeof(self, exchange, default='direct'): + """Get the exchange type instance for `exchange`.""" + try: + type = self.state.exchanges[exchange]['type'] + except KeyError: + type = default + return self.exchange_types[type] + + def _lookup(self, exchange, routing_key, default=None): + """Find all queues matching `routing_key` for the given `exchange`. + + Must return the string `default` if no queues matched. + + """ + if default is None: + default = self.deadletter_queue + try: + R = self.typeof(exchange).lookup( + self.get_table(exchange), + exchange, routing_key, default, + ) + except KeyError: + R = [] + + if not R and default is not None: + warnings.warn(UndeliverableWarning(UNDELIVERABLE_FMT.format( + exchange=exchange, routing_key=routing_key)), + ) + self._new_queue(default) + R = [default] + return R + + def _restore(self, message): + """Redeliver message to its original destination.""" + delivery_info = message.delivery_info + message = message.serializable() + message['redelivered'] = True + for queue in self._lookup( + delivery_info['exchange'], delivery_info['routing_key']): + self._put(queue, message) + + def _restore_at_beginning(self, message): + return self._restore(message) + + def drain_events(self, timeout=None): + if self._consumers and self.qos.can_consume(): + if hasattr(self, '_get_many'): + return self._get_many(self._active_queues, timeout=timeout) + return self._poll(self.cycle, timeout=timeout) + raise Empty() + + def message_to_python(self, raw_message): + """Convert raw message to :class:`Message` instance.""" + if not isinstance(raw_message, self.Message): + return self.Message(self, payload=raw_message) + return raw_message + + def prepare_message(self, body, priority=None, content_type=None, + content_encoding=None, headers=None, properties=None): + """Prepare message data.""" + properties = properties or {} + info = properties.setdefault('delivery_info', {}) + info['priority'] = priority or 0 + + return {'body': body, + 'content-encoding': content_encoding, + 'content-type': content_type, + 'headers': headers or {}, + 'properties': properties or {}} + + def flow(self, active=True): + """Enable/disable message flow. + + :raises NotImplementedError: as flow + is not implemented by the base virtual implementation. + + """ + raise NotImplementedError('virtual channels do not support flow.') + + def close(self): + """Close channel, cancel all consumers, and requeue unacked + messages.""" + if not self.closed: + self.closed = True + for consumer in list(self._consumers): + self.basic_cancel(consumer) + if self._qos: + self._qos.restore_unacked_once() + if self._cycle is not None: + self._cycle.close() + self._cycle = None + if self.connection is not None: + self.connection.close_channel(self) + self.exchange_types = None + + def encode_body(self, body, encoding=None): + if encoding: + return self.codecs.get(encoding).encode(body), encoding + return body, encoding + + def decode_body(self, body, encoding=None): + if encoding: + return self.codecs.get(encoding).decode(body) + return body + + def _reset_cycle(self): + self._cycle = FairCycle(self._get, self._active_queues, Empty) + + def __enter__(self): + return self + + def __exit__(self, *exc_info): + self.close() + + @property + def state(self): + """Broker state containing exchanges and bindings.""" + return self.connection.state + + @property + def qos(self): + """:class:`QoS` manager for this channel.""" + if self._qos is None: + self._qos = self.QoS(self) + return self._qos + + @property + def cycle(self): + if self._cycle is None: + self._reset_cycle() + return self._cycle + + +class Management(base.Management): + + def __init__(self, transport): + super(Management, self).__init__(transport) + self.channel = transport.client.channel() + + def get_bindings(self): + return [dict(destination=q, source=e, routing_key=r) + for q, e, r in self.channel.list_bindings()] + + def close(self): + self.channel.close() + + +class Transport(base.Transport): + """Virtual transport. + + :param client: :class:`~kombu.Connection` instance + + """ + Channel = Channel + Cycle = FairCycle + Management = Management + + #: :class:`BrokerState` containing declared exchanges and + #: bindings (set by constructor). + state = BrokerState() + + #: :class:`~kombu.transport.virtual.scheduling.FairCycle` instance + #: used to fairly drain events from channels (set by constructor). + cycle = None + + #: port number used when no port is specified. + default_port = None + + #: active channels. + channels = None + + #: queue/callback map. + _callbacks = None + + #: Time to sleep between unsuccessful polls. + polling_interval = 1.0 + + #: Max number of channels + channel_max = 65535 + + def __init__(self, client, **kwargs): + self.client = client + self.channels = [] + self._avail_channels = [] + self._callbacks = {} + self.cycle = self.Cycle(self._drain_channel, self.channels, Empty) + polling_interval = client.transport_options.get('polling_interval') + if polling_interval is not None: + self.polling_interval = polling_interval + self._avail_channel_ids = array( + ARRAY_TYPE_H, range(self.channel_max, 0, -1), + ) + + def create_channel(self, connection): + try: + return self._avail_channels.pop() + except IndexError: + channel = self.Channel(connection) + self.channels.append(channel) + return channel + + def close_channel(self, channel): + try: + self._avail_channel_ids.append(channel.channel_id) + try: + self.channels.remove(channel) + except ValueError: + pass + finally: + channel.connection = None + + def establish_connection(self): + # creates channel to verify connection. + # this channel is then used as the next requested channel. + # (returned by ``create_channel``). + self._avail_channels.append(self.create_channel(self)) + return self # for drain events + + def close_connection(self, connection): + self.cycle.close() + for l in self._avail_channels, self.channels: + while l: + try: + channel = l.pop() + except (IndexError, KeyError): # pragma: no cover + pass + else: + channel.close() + + def drain_events(self, connection, timeout=None): + loop = 0 + time_start = monotonic() + get = self.cycle.get + polling_interval = self.polling_interval + while 1: + try: + item, channel = get(timeout=timeout) + except Empty: + if timeout and monotonic() - time_start >= timeout: + raise socket.timeout() + loop += 1 + if polling_interval is not None: + sleep(polling_interval) + else: + break + + message, queue = item + + if not queue or queue not in self._callbacks: + raise KeyError( + 'Message for queue {0!r} without consumers: {1}'.format( + queue, message)) + + self._callbacks[queue](message) + + def _drain_channel(self, channel, timeout=None): + return channel.drain_events(timeout=timeout) + + @property + def default_connection_params(self): + return {'port': self.default_port, 'hostname': 'localhost'} diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/virtual/exchange.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/virtual/exchange.py new file mode 100644 index 0000000..c788a26 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/virtual/exchange.py @@ -0,0 +1,134 @@ +""" +kombu.transport.virtual.exchange +================================ + +Implementations of the standard exchanges defined +by the AMQ protocol (excluding the `headers` exchange). + +""" +from __future__ import absolute_import + +from kombu.utils import escape_regex + +import re + + +class ExchangeType(object): + """Implements the specifics for an exchange type. + + :param channel: AMQ Channel + + """ + type = None + + def __init__(self, channel): + self.channel = channel + + def lookup(self, table, exchange, routing_key, default): + """Lookup all queues matching `routing_key` in `exchange`. + + :returns: `default` if no queues matched. + + """ + raise NotImplementedError('subclass responsibility') + + def prepare_bind(self, queue, exchange, routing_key, arguments): + """Return tuple of `(routing_key, regex, queue)` to be stored + for bindings to this exchange.""" + return routing_key, None, queue + + def equivalent(self, prev, exchange, type, + durable, auto_delete, arguments): + """Return true if `prev` and `exchange` is equivalent.""" + return (type == prev['type'] and + durable == prev['durable'] and + auto_delete == prev['auto_delete'] and + (arguments or {}) == (prev['arguments'] or {})) + + +class DirectExchange(ExchangeType): + """The `direct` exchange routes based on exact routing keys.""" + type = 'direct' + + def lookup(self, table, exchange, routing_key, default): + return [queue for rkey, _, queue in table + if rkey == routing_key] + + def deliver(self, message, exchange, routing_key, **kwargs): + _lookup = self.channel._lookup + _put = self.channel._put + for queue in _lookup(exchange, routing_key): + _put(queue, message, **kwargs) + + +class TopicExchange(ExchangeType): + """The `topic` exchange routes messages based on words separated by + dots, using wildcard characters ``*`` (any single word), and ``#`` + (one or more words).""" + type = 'topic' + + #: map of wildcard to regex conversions + wildcards = {'*': r'.*?[^\.]', + '#': r'.*?'} + + #: compiled regex cache + _compiled = {} + + def lookup(self, table, exchange, routing_key, default): + return [queue for rkey, pattern, queue in table + if self._match(pattern, routing_key)] + + def deliver(self, message, exchange, routing_key, **kwargs): + _lookup = self.channel._lookup + _put = self.channel._put + deadletter = self.channel.deadletter_queue + for queue in [q for q in _lookup(exchange, routing_key) + if q and q != deadletter]: + _put(queue, message, **kwargs) + + def prepare_bind(self, queue, exchange, routing_key, arguments): + return routing_key, self.key_to_pattern(routing_key), queue + + def key_to_pattern(self, rkey): + """Get the corresponding regex for any routing key.""" + return '^%s$' % ('\.'.join( + self.wildcards.get(word, word) + for word in escape_regex(rkey, '.#*').split('.') + )) + + def _match(self, pattern, string): + """Same as :func:`re.match`, except the regex is compiled and cached, + then reused on subsequent matches with the same pattern.""" + try: + compiled = self._compiled[pattern] + except KeyError: + compiled = self._compiled[pattern] = re.compile(pattern, re.U) + return compiled.match(string) + + +class FanoutExchange(ExchangeType): + """The `fanout` exchange implements broadcast messaging by delivering + copies of all messages to all queues bound to the exchange. + + To support fanout the virtual channel needs to store the table + as shared state. This requires that the `Channel.supports_fanout` + attribute is set to true, and the `Channel._queue_bind` and + `Channel.get_table` methods are implemented. See the redis backend + for an example implementation of these methods. + + """ + type = 'fanout' + + def lookup(self, table, exchange, routing_key, default): + return [queue for _, _, queue in table] + + def deliver(self, message, exchange, routing_key, **kwargs): + if self.channel.supports_fanout: + self.channel._put_fanout( + exchange, message, routing_key, **kwargs) + + +#: Map of standard exchange types and corresponding classes. +STANDARD_EXCHANGE_TYPES = {'direct': DirectExchange, + 'topic': TopicExchange, + 'fanout': FanoutExchange} diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/virtual/scheduling.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/virtual/scheduling.py new file mode 100644 index 0000000..bf92a3a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/virtual/scheduling.py @@ -0,0 +1,49 @@ +""" + kombu.transport.virtual.scheduling + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Consumer utilities. + +""" +from __future__ import absolute_import + +from itertools import count + + +class FairCycle(object): + """Consume from a set of resources, where each resource gets + an equal chance to be consumed from.""" + + def __init__(self, fun, resources, predicate=Exception): + self.fun = fun + self.resources = resources + self.predicate = predicate + self.pos = 0 + + def _next(self): + while 1: + try: + resource = self.resources[self.pos] + self.pos += 1 + return resource + except IndexError: + self.pos = 0 + if not self.resources: + raise self.predicate() + + def get(self, **kwargs): + for tried in count(0): # for infinity + resource = self._next() + + try: + return self.fun(resource, **kwargs), resource + except self.predicate: + if tried >= len(self.resources) - 1: + raise + + def close(self): + pass + + def __repr__(self): + return ''.format( + self=self, size=len(self.resources)) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/zmq.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/zmq.py new file mode 100644 index 0000000..e6b8a48 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/zmq.py @@ -0,0 +1,314 @@ +""" +kombu.transport.zmq +=================== + +ZeroMQ transport. + +""" +from __future__ import absolute_import + +import errno +import os +import socket + +try: + import zmq + from zmq import ZMQError +except ImportError: + zmq = ZMQError = None # noqa + +from kombu.five import Empty +from kombu.log import get_logger +from kombu.serialization import pickle +from kombu.utils import cached_property +from kombu.utils.eventio import poll, READ + +from . import virtual + +logger = get_logger('kombu.transport.zmq') + +DEFAULT_PORT = 5555 +DEFAULT_HWM = 128 +DEFAULT_INCR = 1 + +dumps, loads = pickle.dumps, pickle.loads + + +class MultiChannelPoller(object): + eventflags = READ + + def __init__(self): + # active channels + self._channels = set() + # file descriptor -> channel map + self._fd_to_chan = {} + # poll implementation (epoll/kqueue/select) + self.poller = poll() + + def close(self): + for fd in self._fd_to_chan: + try: + self.poller.unregister(fd) + except KeyError: + pass + self._channels.clear() + self._fd_to_chan.clear() + self.poller = None + + def add(self, channel): + self._channels.add(channel) + + def discard(self, channel): + self._channels.discard(channel) + self._fd_to_chan.pop(channel.client.connection.fd, None) + + def _register(self, channel): + conn = channel.client.connection + self._fd_to_chan[conn.fd] = channel + self.poller.register(conn.fd, self.eventflags) + + def on_poll_start(self): + for channel in self._channels: + self._register(channel) + + def on_readable(self, fileno): + chan = self._fd_to_chan[fileno] + return chan.drain_events(), chan + + def get(self, timeout=None): + self.on_poll_start() + + events = self.poller.poll(timeout) + for fileno, _ in events or []: + return self.on_readable(fileno) + + raise Empty() + + @property + def fds(self): + return self._fd_to_chan + + +class Client(object): + + def __init__(self, uri='tcp://127.0.0.1', port=DEFAULT_PORT, + hwm=DEFAULT_HWM, swap_size=None, enable_sink=True, + context=None): + try: + scheme, parts = uri.split('://') + except ValueError: + scheme = 'tcp' + parts = uri + endpoints = parts.split(';') + self.port = port + + if scheme != 'tcp': + raise NotImplementedError('Currently only TCP can be used') + + self.context = context or zmq.Context.instance() + + if enable_sink: + self.sink = self.context.socket(zmq.PULL) + self.sink.bind('tcp://*:{0.port}'.format(self)) + else: + self.sink = None + + self.vent = self.context.socket(zmq.PUSH) + + if hasattr(zmq, 'SNDHWM'): + self.vent.setsockopt(zmq.SNDHWM, hwm) + else: + self.vent.setsockopt(zmq.HWM, hwm) + + if swap_size: + self.vent.setsockopt(zmq.SWAP, swap_size) + + for endpoint in endpoints: + if scheme == 'tcp' and ':' not in endpoint: + endpoint += ':' + str(DEFAULT_PORT) + + endpoint = ''.join([scheme, '://', endpoint]) + + self.connect(endpoint) + + def connect(self, endpoint): + self.vent.connect(endpoint) + + def get(self, queue=None, timeout=None): + sink = self.sink + try: + if timeout is not None: + prev_timeout, sink.RCVTIMEO = sink.RCVTIMEO, timeout + try: + return sink.recv() + finally: + sink.RCVTIMEO = prev_timeout + else: + return sink.recv() + except ZMQError as exc: + if exc.errno == zmq.EAGAIN: + raise socket.error(errno.EAGAIN, exc.strerror) + else: + raise + + def put(self, queue, message, **kwargs): + return self.vent.send(message) + + def close(self): + if self.sink and not self.sink.closed: + self.sink.close() + if not self.vent.closed: + self.vent.close() + + @property + def connection(self): + if self.sink: + return self.sink + return self.vent + + +class Channel(virtual.Channel): + Client = Client + + hwm = DEFAULT_HWM + swap_size = None + enable_sink = True + port_incr = DEFAULT_INCR + + from_transport_options = ( + virtual.Channel.from_transport_options + + ('hwm', 'swap_size', 'enable_sink', 'port_incr') + ) + + def __init__(self, *args, **kwargs): + super_ = super(Channel, self) + super_.__init__(*args, **kwargs) + + # Evaluate socket + self.client.connection.closed + + self.connection.cycle.add(self) + self.connection_errors = self.connection.connection_errors + + def _get(self, queue, timeout=None): + try: + return loads(self.client.get(queue, timeout)) + except socket.error as exc: + if exc.errno == errno.EAGAIN and timeout != 0: + raise Empty() + else: + raise + + def _put(self, queue, message, **kwargs): + self.client.put(queue, dumps(message, -1), **kwargs) + + def _purge(self, queue): + return 0 + + def _poll(self, cycle, timeout=None): + return cycle.get(timeout=timeout) + + def close(self): + if not self.closed: + self.connection.cycle.discard(self) + try: + self.__dict__['client'].close() + except KeyError: + pass + super(Channel, self).close() + + def _prepare_port(self, port): + return (port + self.channel_id - 1) * self.port_incr + + def _create_client(self): + conninfo = self.connection.client + port = self._prepare_port(conninfo.port or DEFAULT_PORT) + return self.Client(uri=conninfo.hostname or 'tcp://127.0.0.1', + port=port, + hwm=self.hwm, + swap_size=self.swap_size, + enable_sink=self.enable_sink, + context=self.connection.context) + + @cached_property + def client(self): + return self._create_client() + + +class Transport(virtual.Transport): + Channel = Channel + + can_parse_url = True + default_port = DEFAULT_PORT + driver_type = 'zeromq' + driver_name = 'zmq' + + connection_errors = virtual.Transport.connection_errors + (ZMQError, ) + + supports_ev = True + polling_interval = None + + def __init__(self, *args, **kwargs): + if zmq is None: + raise ImportError('The zmq library is not installed') + super(Transport, self).__init__(*args, **kwargs) + self.cycle = MultiChannelPoller() + + def driver_version(self): + return zmq.__version__ + + def register_with_event_loop(self, connection, loop): + cycle = self.cycle + cycle.poller = loop.poller + add_reader = loop.add_reader + on_readable = self.on_readable + + cycle_poll_start = cycle.on_poll_start + + def on_poll_start(): + cycle_poll_start() + [add_reader(fd, on_readable, fd) for fd in cycle.fds] + + loop.on_tick.add(on_poll_start) + + def on_readable(self, fileno): + self._handle_event(self.cycle.on_readable(fileno)) + + def drain_events(self, connection, timeout=None): + more_to_read = False + for channel in connection.channels: + try: + evt = channel.cycle.get(timeout=timeout) + except socket.error as exc: + if exc.errno == errno.EAGAIN: + continue + raise + else: + connection._handle_event((evt, channel)) + more_to_read = True + if not more_to_read: + raise socket.error(errno.EAGAIN, os.strerror(errno.EAGAIN)) + + def _handle_event(self, evt): + item, channel = evt + message, queue = item + if not queue or queue not in self._callbacks: + raise KeyError( + 'Message for queue {0!r} without consumers: {1}'.format( + queue, message)) + self._callbacks[queue](message) + + def establish_connection(self): + self.context.closed + return super(Transport, self).establish_connection() + + def close_connection(self, connection): + super(Transport, self).close_connection(connection) + try: + connection.__dict__['context'].term() + except KeyError: + pass + + @cached_property + def context(self): + return zmq.Context(1) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/transport/zookeeper.py b/thesisenv/lib/python3.6/site-packages/kombu/transport/zookeeper.py new file mode 100644 index 0000000..794b8d9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/transport/zookeeper.py @@ -0,0 +1,188 @@ +""" +kombu.transport.zookeeper +========================= + +Zookeeper transport. + +:copyright: (c) 2010 - 2013 by Mahendra M. +:license: BSD, see LICENSE for more details. + +**Synopsis** + +Connects to a zookeeper node as :/ +The becomes the base for all the other znodes. So we can use +it like a vhost. + +This uses the built-in kazoo recipe for queues + +**References** + +- https://zookeeper.apache.org/doc/trunk/recipes.html#sc_recipes_Queues +- https://kazoo.readthedocs.io/en/latest/api/recipe/queue.html + +**Limitations** +This queue does not offer reliable consumption. An entry is removed from +the queue prior to being processed. So if an error occurs, the consumer +has to re-queue the item or it will be lost. +""" +from __future__ import absolute_import + +import os +import socket + +from anyjson import loads, dumps + +from kombu.five import Empty +from kombu.utils.encoding import bytes_to_str + +from . import virtual + +MAX_PRIORITY = 9 + +try: + import kazoo + from kazoo.client import KazooClient + from kazoo.recipe.queue import Queue + + KZ_CONNECTION_ERRORS = ( + kazoo.exceptions.SystemErrorException, + kazoo.exceptions.ConnectionLossException, + kazoo.exceptions.MarshallingErrorException, + kazoo.exceptions.UnimplementedException, + kazoo.exceptions.OperationTimeoutException, + kazoo.exceptions.NoAuthException, + kazoo.exceptions.InvalidACLException, + kazoo.exceptions.AuthFailedException, + kazoo.exceptions.SessionExpiredException, + ) + + KZ_CHANNEL_ERRORS = ( + kazoo.exceptions.RuntimeInconsistencyException, + kazoo.exceptions.DataInconsistencyException, + kazoo.exceptions.BadArgumentsException, + kazoo.exceptions.MarshallingErrorException, + kazoo.exceptions.UnimplementedException, + kazoo.exceptions.OperationTimeoutException, + kazoo.exceptions.ApiErrorException, + kazoo.exceptions.NoNodeException, + kazoo.exceptions.NoAuthException, + kazoo.exceptions.NodeExistsException, + kazoo.exceptions.NoChildrenForEphemeralsException, + kazoo.exceptions.NotEmptyException, + kazoo.exceptions.SessionExpiredException, + kazoo.exceptions.InvalidCallbackException, + socket.error, + ) +except ImportError: + kazoo = None # noqa + KZ_CONNECTION_ERRORS = KZ_CHANNEL_ERRORS = () # noqa + +DEFAULT_PORT = 2181 + +__author__ = 'Mahendra M ' + + +class Channel(virtual.Channel): + + _client = None + _queues = {} + + def _get_path(self, queue_name): + return os.path.join(self.vhost, queue_name) + + def _get_queue(self, queue_name): + queue = self._queues.get(queue_name, None) + + if queue is None: + queue = Queue(self.client, self._get_path(queue_name)) + self._queues[queue_name] = queue + + # Ensure that the queue is created + len(queue) + + return queue + + def _put(self, queue, message, **kwargs): + try: + priority = message['properties']['delivery_info']['priority'] + except KeyError: + priority = 0 + + queue = self._get_queue(queue) + queue.put(dumps(message), priority=(MAX_PRIORITY - priority)) + + def _get(self, queue): + queue = self._get_queue(queue) + msg = queue.get() + + if msg is None: + raise Empty() + + return loads(bytes_to_str(msg)) + + def _purge(self, queue): + count = 0 + queue = self._get_queue(queue) + + while True: + msg = queue.get() + if msg is None: + break + count += 1 + + return count + + def _delete(self, queue, *args, **kwargs): + if self._has_queue(queue): + self._purge(queue) + self.client.delete(self._get_path(queue)) + + def _size(self, queue): + queue = self._get_queue(queue) + return len(queue) + + def _new_queue(self, queue, **kwargs): + if not self._has_queue(queue): + queue = self._get_queue(queue) + + def _has_queue(self, queue): + return self.client.exists(self._get_path(queue)) is not None + + def _open(self): + conninfo = self.connection.client + port = conninfo.port or DEFAULT_PORT + conn_str = '%s:%s' % (conninfo.hostname, port) + self.vhost = os.path.join('/', conninfo.virtual_host[0:-1]) + + conn = KazooClient(conn_str) + conn.start() + return conn + + @property + def client(self): + if self._client is None: + self._client = self._open() + return self._client + + +class Transport(virtual.Transport): + Channel = Channel + polling_interval = 1 + default_port = DEFAULT_PORT + connection_errors = ( + virtual.Transport.connection_errors + KZ_CONNECTION_ERRORS + ) + channel_errors = ( + virtual.Transport.channel_errors + KZ_CHANNEL_ERRORS + ) + driver_type = 'zookeeper' + driver_name = 'kazoo' + + def __init__(self, *args, **kwargs): + if kazoo is None: + raise ImportError('The kazoo library is not installed') + + super(Transport, self).__init__(*args, **kwargs) + + def driver_version(self): + return kazoo.__version__ diff --git a/thesisenv/lib/python3.6/site-packages/kombu/utils/__init__.py b/thesisenv/lib/python3.6/site-packages/kombu/utils/__init__.py new file mode 100644 index 0000000..76779b0 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/utils/__init__.py @@ -0,0 +1,453 @@ +""" +kombu.utils +=========== + +Internal utilities. + +""" +from __future__ import absolute_import, print_function + +import importlib +import numbers +import random +import sys + +from contextlib import contextmanager +from itertools import count, repeat +from functools import wraps +from time import sleep +from uuid import UUID, uuid4 +try: + from uuid import _uuid_generate_random +except ImportError: + _uuid_generate_random = None + +from kombu.five import items, reraise, string_t + +from .encoding import default_encode, safe_repr as _safe_repr + +try: + import ctypes +except: + ctypes = None # noqa + +try: + from io import UnsupportedOperation + FILENO_ERRORS = (AttributeError, ValueError, UnsupportedOperation) +except ImportError: # pragma: no cover + # Py2 + FILENO_ERRORS = (AttributeError, ValueError) # noqa + + +__all__ = ['EqualityDict', 'say', 'uuid', 'kwdict', 'maybe_list', + 'fxrange', 'fxrangemax', 'retry_over_time', + 'emergency_dump_state', 'cached_property', + 'reprkwargs', 'reprcall', 'nested', 'fileno', 'maybe_fileno'] + + +def symbol_by_name(name, aliases={}, imp=None, package=None, + sep='.', default=None, **kwargs): + """Get symbol by qualified name. + + The name should be the full dot-separated path to the class:: + + modulename.ClassName + + Example:: + + celery.concurrency.processes.TaskPool + ^- class name + + or using ':' to separate module and symbol:: + + celery.concurrency.processes:TaskPool + + If `aliases` is provided, a dict containing short name/long name + mappings, the name is looked up in the aliases first. + + Examples: + + >>> symbol_by_name('celery.concurrency.processes.TaskPool') + + + >>> symbol_by_name('default', { + ... 'default': 'celery.concurrency.processes.TaskPool'}) + + + # Does not try to look up non-string names. + >>> from celery.concurrency.processes import TaskPool + >>> symbol_by_name(TaskPool) is TaskPool + True + + """ + if imp is None: + imp = importlib.import_module + + if not isinstance(name, string_t): + return name # already a class + + name = aliases.get(name) or name + sep = ':' if ':' in name else sep + module_name, _, cls_name = name.rpartition(sep) + if not module_name: + cls_name, module_name = None, package if package else cls_name + try: + try: + module = imp(module_name, package=package, **kwargs) + except ValueError as exc: + reraise(ValueError, + ValueError("Couldn't import {0!r}: {1}".format(name, exc)), + sys.exc_info()[2]) + return getattr(module, cls_name) if cls_name else module + except (ImportError, AttributeError): + if default is None: + raise + return default + + +class HashedSeq(list): + """type used for hash() to make sure the hash is not generated + multiple times.""" + __slots__ = 'hashvalue' + + def __init__(self, *seq): + self[:] = seq + self.hashvalue = hash(seq) + + def __hash__(self): + return self.hashvalue + + +def eqhash(o): + try: + return o.__eqhash__() + except AttributeError: + return hash(o) + + +class EqualityDict(dict): + + def __getitem__(self, key): + h = eqhash(key) + if h not in self: + return self.__missing__(key) + return dict.__getitem__(self, h) + + def __setitem__(self, key, value): + return dict.__setitem__(self, eqhash(key), value) + + def __delitem__(self, key): + return dict.__delitem__(self, eqhash(key)) + + +def say(m, *fargs, **fkwargs): + print(str(m).format(*fargs, **fkwargs), file=sys.stderr) + + +if ctypes and _uuid_generate_random: # pragma: no cover + def uuid4(): + # Workaround for http://bugs.python.org/issue4607 + buffer = ctypes.create_string_buffer(16) + _uuid_generate_random(buffer) + return UUID(bytes=buffer.raw) + + +def uuid(): + """Generate a unique id, having - hopefully - a very small chance of + collision. + + For now this is provided by :func:`uuid.uuid4`. + """ + return str(uuid4()) +gen_unique_id = uuid + + +if sys.version_info >= (2, 6, 5): + + def kwdict(kwargs): + return kwargs +else: + def kwdict(kwargs): # pragma: no cover # noqa + """Make sure keyword arguments are not in Unicode. + + This should be fixed in newer Python versions, + see: http://bugs.python.org/issue4978. + + """ + return dict((key.encode('utf-8'), value) + for key, value in items(kwargs)) + + +def maybe_list(v): + if v is None: + return [] + if hasattr(v, '__iter__'): + return v + return [v] + + +def fxrange(start=1.0, stop=None, step=1.0, repeatlast=False): + cur = start * 1.0 + while 1: + if not stop or cur <= stop: + yield cur + cur += step + else: + if not repeatlast: + break + yield cur - step + + +def fxrangemax(start=1.0, stop=None, step=1.0, max=100.0): + sum_, cur = 0, start * 1.0 + while 1: + if sum_ >= max: + break + yield cur + if stop: + cur = min(cur + step, stop) + else: + cur += step + sum_ += cur + + +def retry_over_time(fun, catch, args=[], kwargs={}, errback=None, + max_retries=None, interval_start=2, interval_step=2, + interval_max=30, callback=None): + """Retry the function over and over until max retries is exceeded. + + For each retry we sleep a for a while before we try again, this interval + is increased for every retry until the max seconds is reached. + + :param fun: The function to try + :param catch: Exceptions to catch, can be either tuple or a single + exception class. + :keyword args: Positional arguments passed on to the function. + :keyword kwargs: Keyword arguments passed on to the function. + :keyword errback: Callback for when an exception in ``catch`` is raised. + The callback must take two arguments: ``exc`` and ``interval``, where + ``exc`` is the exception instance, and ``interval`` is the time in + seconds to sleep next.. + :keyword max_retries: Maximum number of retries before we give up. + If this is not set, we will retry forever. + :keyword interval_start: How long (in seconds) we start sleeping between + retries. + :keyword interval_step: By how much the interval is increased for each + retry. + :keyword interval_max: Maximum number of seconds to sleep between retries. + + """ + retries = 0 + interval_range = fxrange(interval_start, + interval_max + interval_start, + interval_step, repeatlast=True) + for retries in count(): + try: + return fun(*args, **kwargs) + except catch as exc: + if max_retries and retries >= max_retries: + raise + if callback: + callback() + tts = float(errback(exc, interval_range, retries) if errback + else next(interval_range)) + if tts: + for _ in range(int(tts)): + if callback: + callback() + sleep(1.0) + # sleep remainder after int truncation above. + sleep(abs(int(tts) - tts)) + + +def emergency_dump_state(state, open_file=open, dump=None): + from pprint import pformat + from tempfile import mktemp + + if dump is None: + import pickle + dump = pickle.dump + persist = mktemp() + say('EMERGENCY DUMP STATE TO FILE -> {0} <-', persist) + fh = open_file(persist, 'w') + try: + try: + dump(state, fh, protocol=0) + except Exception as exc: + say('Cannot pickle state: {0!r}. Fallback to pformat.', exc) + fh.write(default_encode(pformat(state))) + finally: + fh.flush() + fh.close() + return persist + + +class cached_property(object): + """Property descriptor that caches the return value + of the get function. + + *Examples* + + .. code-block:: python + + @cached_property + def connection(self): + return Connection() + + @connection.setter # Prepares stored value + def connection(self, value): + if value is None: + raise TypeError('Connection must be a connection') + return value + + @connection.deleter + def connection(self, value): + # Additional action to do at del(self.attr) + if value is not None: + print('Connection {0!r} deleted'.format(value) + + """ + + def __init__(self, fget=None, fset=None, fdel=None, doc=None): + self.__get = fget + self.__set = fset + self.__del = fdel + self.__doc__ = doc or fget.__doc__ + self.__name__ = fget.__name__ + self.__module__ = fget.__module__ + + def __get__(self, obj, type=None): + if obj is None: + return self + try: + return obj.__dict__[self.__name__] + except KeyError: + value = obj.__dict__[self.__name__] = self.__get(obj) + return value + + def __set__(self, obj, value): + if obj is None: + return self + if self.__set is not None: + value = self.__set(obj, value) + obj.__dict__[self.__name__] = value + + def __delete__(self, obj): + if obj is None: + return self + try: + value = obj.__dict__.pop(self.__name__) + except KeyError: + pass + else: + if self.__del is not None: + self.__del(obj, value) + + def setter(self, fset): + return self.__class__(self.__get, fset, self.__del) + + def deleter(self, fdel): + return self.__class__(self.__get, self.__set, fdel) + + +def reprkwargs(kwargs, sep=', ', fmt='{0}={1}'): + return sep.join(fmt.format(k, _safe_repr(v)) for k, v in items(kwargs)) + + +def reprcall(name, args=(), kwargs={}, sep=', '): + return '{0}({1}{2}{3})'.format( + name, sep.join(map(_safe_repr, args or ())), + (args and kwargs) and sep or '', + reprkwargs(kwargs, sep), + ) + + +@contextmanager +def nested(*managers): # pragma: no cover + # flake8: noqa + """Combine multiple context managers into a single nested + context manager.""" + exits = [] + vars = [] + exc = (None, None, None) + try: + try: + for mgr in managers: + exit = mgr.__exit__ + enter = mgr.__enter__ + vars.append(enter()) + exits.append(exit) + yield vars + except: + exc = sys.exc_info() + finally: + while exits: + exit = exits.pop() + try: + if exit(*exc): + exc = (None, None, None) + except: + exc = sys.exc_info() + if exc != (None, None, None): + # Don't rely on sys.exc_info() still containing + # the right information. Another exception may + # have been raised and caught by an exit method + reraise(exc[0], exc[1], exc[2]) + finally: + del(exc) + + +def shufflecycle(it): + it = list(it) # don't modify callers list + shuffle = random.shuffle + for _ in repeat(None): + shuffle(it) + yield it[0] + + +def entrypoints(namespace): + try: + from pkg_resources import iter_entry_points + except ImportError: + return iter([]) + return ((ep, ep.load()) for ep in iter_entry_points(namespace)) + + +class ChannelPromise(object): + + def __init__(self, contract): + self.__contract__ = contract + + def __call__(self): + try: + return self.__value__ + except AttributeError: + value = self.__value__ = self.__contract__() + return value + + def __repr__(self): + try: + return repr(self.__value__) + except AttributeError: + return ''.format(id(self.__contract__)) + + +def escape_regex(p, white=''): + # what's up with re.escape? that code must be neglected or someting + return ''.join(c if c.isalnum() or c in white + else ('\\000' if c == '\000' else '\\' + c) + for c in p) + + +def fileno(f): + if isinstance(f, numbers.Integral): + return f + return f.fileno() + + +def maybe_fileno(f): + """Get object fileno, or :const:`None` if not defined.""" + try: + return fileno(f) + except FILENO_ERRORS: + pass diff --git a/thesisenv/lib/python3.6/site-packages/kombu/utils/amq_manager.py b/thesisenv/lib/python3.6/site-packages/kombu/utils/amq_manager.py new file mode 100644 index 0000000..ce7bb4c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/utils/amq_manager.py @@ -0,0 +1,18 @@ +from __future__ import absolute_import + + +def get_manager(client, hostname=None, port=None, userid=None, + password=None): + import pyrabbit + opt = client.transport_options.get + + def get(name, val, default): + return (val if val is not None + else opt('manager_%s' % name) or + getattr(client, name, None) or default) + + host = get('hostname', hostname, 'localhost') + port = port if port is not None else opt('manager_port', 15672) + userid = get('userid', userid, 'guest') + password = get('password', password, 'guest') + return pyrabbit.Client('%s:%s' % (host, port), userid, password) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/utils/compat.py b/thesisenv/lib/python3.6/site-packages/kombu/utils/compat.py new file mode 100644 index 0000000..d0c3e67 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/utils/compat.py @@ -0,0 +1,60 @@ +""" +kombu.utils.compat +================== + +Helps compatibility with older Python versions. + +""" +from __future__ import absolute_import + + +# ############# timedelta_seconds() -> delta.total_seconds ################### +from datetime import timedelta + +HAVE_TIMEDELTA_TOTAL_SECONDS = hasattr(timedelta, 'total_seconds') + + +if HAVE_TIMEDELTA_TOTAL_SECONDS: # pragma: no cover + + def timedelta_seconds(delta): + """Convert :class:`datetime.timedelta` to seconds. + + Doesn't account for negative values. + + """ + return max(delta.total_seconds(), 0) + +else: # pragma: no cover + + def timedelta_seconds(delta): # noqa + """Convert :class:`datetime.timedelta` to seconds. + + Doesn't account for negative values. + + """ + if delta.days < 0: + return 0 + return delta.days * 86400 + delta.seconds + (delta.microseconds / 10e5) + +# ############# socket.error.errno ########################################### + + +def get_errno(exc): + """:exc:`socket.error` and :exc:`IOError` first got + the ``.errno`` attribute in Py2.7""" + try: + return exc.errno + except AttributeError: + try: + # e.args = (errno, reason) + if isinstance(exc.args, tuple) and len(exc.args) == 2: + return exc.args[0] + except AttributeError: + pass + return 0 + +# ############# collections.OrderedDict ###################################### +try: + from collections import OrderedDict +except ImportError: + from ordereddict import OrderedDict # noqa diff --git a/thesisenv/lib/python3.6/site-packages/kombu/utils/debug.py b/thesisenv/lib/python3.6/site-packages/kombu/utils/debug.py new file mode 100644 index 0000000..8d08115 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/utils/debug.py @@ -0,0 +1,65 @@ +""" +kombu.utils.debug +================= + +Debugging support. + +""" +from __future__ import absolute_import + +import logging + +from functools import wraps + +from kombu.five import items +from kombu.log import get_logger + +__all__ = ['setup_logging', 'Logwrapped'] + + +def setup_logging(loglevel=logging.DEBUG, loggers=['kombu.connection', + 'kombu.channel']): + for logger in loggers: + l = get_logger(logger) + l.addHandler(logging.StreamHandler()) + l.setLevel(loglevel) + + +class Logwrapped(object): + __ignore = ('__enter__', '__exit__') + + def __init__(self, instance, logger=None, ident=None): + self.instance = instance + self.logger = get_logger(logger) + self.ident = ident + + def __getattr__(self, key): + meth = getattr(self.instance, key) + + if not callable(meth) or key in self.__ignore: + return meth + + @wraps(meth) + def __wrapped(*args, **kwargs): + info = '' + if self.ident: + info += self.ident.format(self.instance) + info += '{0.__name__}('.format(meth) + if args: + info += ', '.join(map(repr, args)) + if kwargs: + if args: + info += ', ' + info += ', '.join('{k}={v!r}'.format(k=key, v=value) + for key, value in items(kwargs)) + info += ')' + self.logger.debug(info) + return meth(*args, **kwargs) + + return __wrapped + + def __repr__(self): + return repr(self.instance) + + def __dir__(self): + return dir(self.instance) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/utils/encoding.py b/thesisenv/lib/python3.6/site-packages/kombu/utils/encoding.py new file mode 100644 index 0000000..d054257 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/utils/encoding.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +""" +kombu.utils.encoding +~~~~~~~~~~~~~~~~~~~~~ + +Utilities to encode text, and to safely emit text from running +applications without crashing with the infamous :exc:`UnicodeDecodeError` +exception. + +""" +from __future__ import absolute_import + +import sys +import traceback + +from kombu.five import text_t + +is_py3k = sys.version_info >= (3, 0) + +#: safe_str takes encoding from this file by default. +#: :func:`set_default_encoding_file` can used to set the +#: default output file. +default_encoding_file = None + + +def set_default_encoding_file(file): + global default_encoding_file + default_encoding_file = file + + +def get_default_encoding_file(): + return default_encoding_file + + +if sys.platform.startswith('java'): # pragma: no cover + + def default_encoding(file=None): + return 'utf-8' +else: + + def default_encoding(file=None): # noqa + file = file or get_default_encoding_file() + return getattr(file, 'encoding', None) or sys.getfilesystemencoding() + +if is_py3k: # pragma: no cover + + def str_to_bytes(s): + if isinstance(s, str): + return s.encode() + return s + + def bytes_to_str(s): + if isinstance(s, bytes): + return s.decode() + return s + + def from_utf8(s, *args, **kwargs): + return s + + def ensure_bytes(s): + if not isinstance(s, bytes): + return str_to_bytes(s) + return s + + def default_encode(obj): + return obj + + str_t = str + +else: + + def str_to_bytes(s): # noqa + if isinstance(s, unicode): + return s.encode() + return s + + def bytes_to_str(s): # noqa + return s + + def from_utf8(s, *args, **kwargs): # noqa + return s.encode('utf-8', *args, **kwargs) + + def default_encode(obj, file=None): # noqa + return unicode(obj, default_encoding(file)) + + str_t = unicode + ensure_bytes = str_to_bytes + + +try: + bytes_t = bytes +except NameError: # pragma: no cover + bytes_t = str # noqa + + +def safe_str(s, errors='replace'): + s = bytes_to_str(s) + if not isinstance(s, (text_t, bytes)): + return safe_repr(s, errors) + return _safe_str(s, errors) + + +if is_py3k: + + def _safe_str(s, errors='replace', file=None): + if isinstance(s, str): + return s + try: + return str(s) + except Exception as exc: + return ''.format( + type(s), exc, '\n'.join(traceback.format_stack())) +else: + def _safe_str(s, errors='replace', file=None): # noqa + encoding = default_encoding(file) + try: + if isinstance(s, unicode): + return s.encode(encoding, errors) + return unicode(s, encoding, errors) + except Exception as exc: + return ''.format( + type(s), exc, '\n'.join(traceback.format_stack())) + + +def safe_repr(o, errors='replace'): + try: + return repr(o) + except Exception: + return _safe_str(o, errors) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/utils/eventio.py b/thesisenv/lib/python3.6/site-packages/kombu/utils/eventio.py new file mode 100644 index 0000000..859e464 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/utils/eventio.py @@ -0,0 +1,264 @@ +""" +kombu.utils.eventio +=================== + +Evented IO support for multiple platforms. + +""" +from __future__ import absolute_import + +import errno +import select as __select__ +import socket + +from numbers import Integral + +from kombu.syn import detect_environment + +from . import fileno +from .compat import get_errno + +__all__ = ['poll'] + +READ = POLL_READ = 0x001 +WRITE = POLL_WRITE = 0x004 +ERR = POLL_ERR = 0x008 | 0x010 + +_selectf = __select__.select +_selecterr = __select__.error +epoll = getattr(__select__, 'epoll', None) +kqueue = getattr(__select__, 'kqueue', None) +kevent = getattr(__select__, 'kevent', None) +KQ_EV_ADD = getattr(__select__, 'KQ_EV_ADD', 1) +KQ_EV_DELETE = getattr(__select__, 'KQ_EV_DELETE', 2) +KQ_EV_ENABLE = getattr(__select__, 'KQ_EV_ENABLE', 4) +KQ_EV_CLEAR = getattr(__select__, 'KQ_EV_CLEAR', 32) +KQ_EV_ERROR = getattr(__select__, 'KQ_EV_ERROR', 16384) +KQ_EV_EOF = getattr(__select__, 'KQ_EV_EOF', 32768) +KQ_FILTER_READ = getattr(__select__, 'KQ_FILTER_READ', -1) +KQ_FILTER_WRITE = getattr(__select__, 'KQ_FILTER_WRITE', -2) +KQ_FILTER_AIO = getattr(__select__, 'KQ_FILTER_AIO', -3) +KQ_FILTER_VNODE = getattr(__select__, 'KQ_FILTER_VNODE', -4) +KQ_FILTER_PROC = getattr(__select__, 'KQ_FILTER_PROC', -5) +KQ_FILTER_SIGNAL = getattr(__select__, 'KQ_FILTER_SIGNAL', -6) +KQ_FILTER_TIMER = getattr(__select__, 'KQ_FILTER_TIMER', -7) +KQ_NOTE_LOWAT = getattr(__select__, 'KQ_NOTE_LOWAT', 1) +KQ_NOTE_DELETE = getattr(__select__, 'KQ_NOTE_DELETE', 1) +KQ_NOTE_WRITE = getattr(__select__, 'KQ_NOTE_WRITE', 2) +KQ_NOTE_EXTEND = getattr(__select__, 'KQ_NOTE_EXTEND', 4) +KQ_NOTE_ATTRIB = getattr(__select__, 'KQ_NOTE_ATTRIB', 8) +KQ_NOTE_LINK = getattr(__select__, 'KQ_NOTE_LINK', 16) +KQ_NOTE_RENAME = getattr(__select__, 'KQ_NOTE_RENAME', 32) +KQ_NOTE_REVOKE = getattr(__select__, 'kQ_NOTE_REVOKE', 64) + +try: + SELECT_BAD_FD = set((errno.EBADF, errno.WSAENOTSOCK)) +except AttributeError: + SELECT_BAD_FD = set((errno.EBADF,)) + + +class Poller(object): + + def poll(self, timeout): + try: + return self._poll(timeout) + except Exception as exc: + if get_errno(exc) != errno.EINTR: + raise + + +class _epoll(Poller): + + def __init__(self): + self._epoll = epoll() + + def register(self, fd, events): + try: + self._epoll.register(fd, events) + except Exception as exc: + if get_errno(exc) != errno.EEXIST: + raise + + def unregister(self, fd): + try: + self._epoll.unregister(fd) + except (socket.error, ValueError, KeyError, TypeError): + pass + except (IOError, OSError) as exc: + if get_errno(exc) not in (errno.ENOENT, errno.EPERM): + raise + + def _poll(self, timeout): + return self._epoll.poll(timeout if timeout is not None else -1) + + def close(self): + self._epoll.close() + + +class _kqueue(Poller): + w_fflags = (KQ_NOTE_WRITE | KQ_NOTE_EXTEND | + KQ_NOTE_ATTRIB | KQ_NOTE_DELETE) + + def __init__(self): + self._kqueue = kqueue() + self._active = {} + self.on_file_change = None + self._kcontrol = self._kqueue.control + + def register(self, fd, events): + self._control(fd, events, KQ_EV_ADD) + self._active[fd] = events + + def unregister(self, fd): + events = self._active.pop(fd, None) + if events: + try: + self._control(fd, events, KQ_EV_DELETE) + except socket.error: + pass + + def watch_file(self, fd): + ev = kevent(fd, + filter=KQ_FILTER_VNODE, + flags=KQ_EV_ADD | KQ_EV_ENABLE | KQ_EV_CLEAR, + fflags=self.w_fflags) + self._kcontrol([ev], 0) + + def unwatch_file(self, fd): + ev = kevent(fd, + filter=KQ_FILTER_VNODE, + flags=KQ_EV_DELETE, + fflags=self.w_fflags) + self._kcontrol([ev], 0) + + def _control(self, fd, events, flags): + if not events: + return + kevents = [] + if events & WRITE: + kevents.append(kevent(fd, + filter=KQ_FILTER_WRITE, + flags=flags)) + if not kevents or events & READ: + kevents.append( + kevent(fd, filter=KQ_FILTER_READ, flags=flags), + ) + control = self._kcontrol + for e in kevents: + try: + control([e], 0) + except ValueError: + pass + + def _poll(self, timeout): + kevents = self._kcontrol(None, 1000, timeout) + events, file_changes = {}, [] + for k in kevents: + fd = k.ident + if k.filter == KQ_FILTER_READ: + events[fd] = events.get(fd, 0) | READ + elif k.filter == KQ_FILTER_WRITE: + if k.flags & KQ_EV_EOF: + events[fd] = ERR + else: + events[fd] = events.get(fd, 0) | WRITE + elif k.filter == KQ_EV_ERROR: + events[fd] = events.get(fd, 0) | ERR + elif k.filter == KQ_FILTER_VNODE: + if k.fflags & KQ_NOTE_DELETE: + self.unregister(fd) + file_changes.append(k) + if file_changes: + self.on_file_change(file_changes) + return list(events.items()) + + def close(self): + self._kqueue.close() + + +class _select(Poller): + + def __init__(self): + self._all = (self._rfd, + self._wfd, + self._efd) = set(), set(), set() + + def register(self, fd, events): + fd = fileno(fd) + if events & ERR: + self._efd.add(fd) + if events & WRITE: + self._wfd.add(fd) + if events & READ: + self._rfd.add(fd) + + def _remove_bad(self): + for fd in self._rfd | self._wfd | self._efd: + try: + _selectf([fd], [], [], 0) + except (_selecterr, socket.error) as exc: + if get_errno(exc) in SELECT_BAD_FD: + self.unregister(fd) + + def unregister(self, fd): + try: + fd = fileno(fd) + except socket.error as exc: + # we don't know the previous fd of this object + # but it will be removed by the next poll iteration. + if get_errno(exc) in SELECT_BAD_FD: + return + raise + self._rfd.discard(fd) + self._wfd.discard(fd) + self._efd.discard(fd) + + def _poll(self, timeout): + try: + read, write, error = _selectf( + self._rfd, self._wfd, self._efd, timeout, + ) + except (_selecterr, socket.error) as exc: + if get_errno(exc) == errno.EINTR: + return + elif get_errno(exc) in SELECT_BAD_FD: + return self._remove_bad() + raise + + events = {} + for fd in read: + if not isinstance(fd, Integral): + fd = fd.fileno() + events[fd] = events.get(fd, 0) | READ + for fd in write: + if not isinstance(fd, Integral): + fd = fd.fileno() + events[fd] = events.get(fd, 0) | WRITE + for fd in error: + if not isinstance(fd, Integral): + fd = fd.fileno() + events[fd] = events.get(fd, 0) | ERR + return list(events.items()) + + def close(self): + self._rfd.clear() + self._wfd.clear() + self._efd.clear() + + +def _get_poller(): + if detect_environment() != 'default': + # greenlet + return _select + elif epoll: + # Py2.6+ Linux + return _epoll + elif kqueue: + # Py2.6+ on BSD / Darwin + return _select # was: _kqueue + else: + return _select + + +def poll(*args, **kwargs): + return _get_poller()(*args, **kwargs) diff --git a/thesisenv/lib/python3.6/site-packages/kombu/utils/functional.py b/thesisenv/lib/python3.6/site-packages/kombu/utils/functional.py new file mode 100644 index 0000000..746f42f --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/utils/functional.py @@ -0,0 +1,82 @@ +from __future__ import absolute_import + +import sys + +from collections import Iterable, Mapping + +from kombu.five import string_t + +__all__ = ['lazy', 'maybe_evaluate', 'is_list', 'maybe_list'] + + +class lazy(object): + """Holds lazy evaluation. + + Evaluated when called or if the :meth:`evaluate` method is called. + The function is re-evaluated on every call. + + Overloaded operations that will evaluate the promise: + :meth:`__str__`, :meth:`__repr__`, :meth:`__cmp__`. + + """ + + def __init__(self, fun, *args, **kwargs): + self._fun = fun + self._args = args + self._kwargs = kwargs + + def __call__(self): + return self.evaluate() + + def evaluate(self): + return self._fun(*self._args, **self._kwargs) + + def __str__(self): + return str(self()) + + def __repr__(self): + return repr(self()) + + def __eq__(self, rhs): + return self() == rhs + + def __ne__(self, rhs): + return self() != rhs + + def __deepcopy__(self, memo): + memo[id(self)] = self + return self + + def __reduce__(self): + return (self.__class__, (self._fun, ), {'_args': self._args, + '_kwargs': self._kwargs}) + + if sys.version_info[0] < 3: + + def __cmp__(self, rhs): + if isinstance(rhs, self.__class__): + return -cmp(rhs, self()) + return cmp(self(), rhs) + + +def maybe_evaluate(value): + """Evaluates if the value is a :class:`lazy` instance.""" + if isinstance(value, lazy): + return value.evaluate() + return value + + +def is_list(l, scalars=(Mapping, string_t), iters=(Iterable, )): + """Return true if the object is iterable (but not + if object is a mapping or string).""" + return isinstance(l, iters) and not isinstance(l, scalars or ()) + + +def maybe_list(l, scalars=(Mapping, string_t)): + """Return list of one element if ``l`` is a scalar.""" + return l if l is None or is_list(l, scalars) else [l] + + +# Compat names (before kombu 3.0) +promise = lazy +maybe_promise = maybe_evaluate diff --git a/thesisenv/lib/python3.6/site-packages/kombu/utils/limits.py b/thesisenv/lib/python3.6/site-packages/kombu/utils/limits.py new file mode 100644 index 0000000..833cb96 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/utils/limits.py @@ -0,0 +1,69 @@ +""" +kombu.utils.limits +================== + +Token bucket implementation for rate limiting. + +""" +from __future__ import absolute_import + +from kombu.five import monotonic + +__all__ = ['TokenBucket'] + + +class TokenBucket(object): + """Token Bucket Algorithm. + + See http://en.wikipedia.org/wiki/Token_Bucket + Most of this code was stolen from an entry in the ASPN Python Cookbook: + http://code.activestate.com/recipes/511490/ + + .. admonition:: Thread safety + + This implementation is not thread safe. Access to a `TokenBucket` + instance should occur within the critical section of any multithreaded + code. + + """ + + #: The rate in tokens/second that the bucket will be refilled. + fill_rate = None + + #: Maximum number of tokens in the bucket. + capacity = 1 + + #: Timestamp of the last time a token was taken out of the bucket. + timestamp = None + + def __init__(self, fill_rate, capacity=1): + self.capacity = float(capacity) + self._tokens = capacity + self.fill_rate = float(fill_rate) + self.timestamp = monotonic() + + def can_consume(self, tokens=1): + """Return :const:`True` if the number of tokens can be consumed + from the bucket. If they can be consumed, a call will also consume the + requested number of tokens from the bucket. Calls will only consume + `tokens` (the number requested) or zero tokens -- it will never consume + a partial number of tokens.""" + if tokens <= self._get_tokens(): + self._tokens -= tokens + return True + return False + + def expected_time(self, tokens=1): + """Return the time (in seconds) when a new token is expected + to be available. This will not consume any tokens from the bucket.""" + _tokens = self._get_tokens() + tokens = max(tokens, _tokens) + return (tokens - _tokens) / self.fill_rate + + def _get_tokens(self): + if self._tokens < self.capacity: + now = monotonic() + delta = self.fill_rate * (now - self.timestamp) + self._tokens = min(self.capacity, self._tokens + delta) + self.timestamp = now + return self._tokens diff --git a/thesisenv/lib/python3.6/site-packages/kombu/utils/text.py b/thesisenv/lib/python3.6/site-packages/kombu/utils/text.py new file mode 100644 index 0000000..066b28a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/utils/text.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from difflib import SequenceMatcher + +from kombu import version_info_t +from kombu.five import string_t + + +def fmatch_iter(needle, haystack, min_ratio=0.6): + for key in haystack: + ratio = SequenceMatcher(None, needle, key).ratio() + if ratio >= min_ratio: + yield ratio, key + + +def fmatch_best(needle, haystack, min_ratio=0.6): + try: + return sorted( + fmatch_iter(needle, haystack, min_ratio), reverse=True, + )[0][1] + except IndexError: + pass + + +def version_string_as_tuple(s): + v = _unpack_version(*s.split('.')) + # X.Y.3a1 -> (X, Y, 3, 'a1') + if isinstance(v.micro, string_t): + v = version_info_t(v.major, v.minor, *_splitmicro(*v[2:])) + # X.Y.3a1-40 -> (X, Y, 3, 'a1', '40') + if not v.serial and v.releaselevel and '-' in v.releaselevel: + v = version_info_t(*list(v[0:3]) + v.releaselevel.split('-')) + return v + + +def _unpack_version(major, minor=0, micro=0, releaselevel='', serial=''): + return version_info_t(int(major), int(minor), micro, releaselevel, serial) + + +def _splitmicro(micro, releaselevel='', serial=''): + for index, char in enumerate(micro): + if not char.isdigit(): + break + else: + return int(micro or 0), releaselevel, serial + return int(micro[:index]), micro[index:], serial diff --git a/thesisenv/lib/python3.6/site-packages/kombu/utils/url.py b/thesisenv/lib/python3.6/site-packages/kombu/utils/url.py new file mode 100644 index 0000000..f93282d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/kombu/utils/url.py @@ -0,0 +1,64 @@ +from __future__ import absolute_import + +from functools import partial + +try: + from urllib.parse import parse_qsl, quote, unquote, urlparse +except ImportError: + from urllib import quote, unquote # noqa + from urlparse import urlparse, parse_qsl # noqa + +from . import kwdict +from kombu.five import string_t + +safequote = partial(quote, safe='') + + +def _parse_url(url): + scheme = urlparse(url).scheme + schemeless = url[len(scheme) + 3:] + # parse with HTTP URL semantics + parts = urlparse('http://' + schemeless) + path = parts.path or '' + path = path[1:] if path and path[0] == '/' else path + return (scheme, unquote(parts.hostname or '') or None, parts.port, + unquote(parts.username or '') or None, + unquote(parts.password or '') or None, + unquote(path or '') or None, + kwdict(dict(parse_qsl(parts.query)))) + + +def parse_url(url): + scheme, host, port, user, password, path, query = _parse_url(url) + return dict(transport=scheme, hostname=host, + port=port, userid=user, + password=password, virtual_host=path, **query) + + +def as_url(scheme, host=None, port=None, user=None, password=None, + path=None, query=None, sanitize=False, mask='**'): + parts = ['{0}://'.format(scheme)] + if user or password: + if user: + parts.append(safequote(user)) + if password: + if sanitize: + parts.extend([':', mask] if mask else [':']) + else: + parts.extend([':', safequote(password)]) + parts.append('@') + parts.append(safequote(host) if host else '') + if port: + parts.extend([':', port]) + parts.extend(['/', path]) + return ''.join(str(part) for part in parts if part) + + +def sanitize_url(url, mask='**'): + return as_url(*_parse_url(url), sanitize=True, mask=mask) + + +def maybe_sanitize_url(url, mask='**'): + if isinstance(url, string_t) and '://' in url: + return sanitize_url(url, mask) + return url diff --git a/thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/INSTALLER b/thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/thesisenv/lib/python3.6/site-packages/pip-18.0.dist-info/LICENSE.txt b/thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/LICENSE.txt similarity index 100% rename from thesisenv/lib/python3.6/site-packages/pip-18.0.dist-info/LICENSE.txt rename to thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/LICENSE.txt diff --git a/thesisenv/lib/python3.6/site-packages/pip-18.0.dist-info/METADATA b/thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/METADATA similarity index 97% rename from thesisenv/lib/python3.6/site-packages/pip-18.0.dist-info/METADATA rename to thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/METADATA index e1ebba8..2e314aa 100644 --- a/thesisenv/lib/python3.6/site-packages/pip-18.0.dist-info/METADATA +++ b/thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/METADATA @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: pip -Version: 18.0 +Version: 18.1 Summary: The PyPA recommended tool for installing Python packages. Home-page: https://pip.pypa.io/ Author: The pip developers @@ -19,6 +19,7 @@ Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.4 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: Implementation :: PyPy Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.* diff --git a/thesisenv/lib/python3.6/site-packages/pip-18.0.dist-info/RECORD b/thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/RECORD similarity index 81% rename from thesisenv/lib/python3.6/site-packages/pip-18.0.dist-info/RECORD rename to thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/RECORD index 6556cc1..476de55 100644 --- a/thesisenv/lib/python3.6/site-packages/pip-18.0.dist-info/RECORD +++ b/thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/RECORD @@ -1,72 +1,81 @@ -pip/__init__.py,sha256=IKM5mKN1HOw4yPACjY238eZg9mv0kYUOquDTItUjtP4,21 +pip/__init__.py,sha256=nO-iphoXiDoci_ZAMl-PG2zdd4Y7m88jBDILTYzwGy4,21 pip/__main__.py,sha256=L3IHqBeasELUHvwy5CT_izVEMhM12tve289qut49DvU,623 -pip/_internal/__init__.py,sha256=wzTPPJ7Lieb-khuxuRLbXXOtx1greP5hZtJTnwUFPqk,11250 -pip/_internal/basecommand.py,sha256=m_PKP9SP7nRi3tWo1oeTU6sIf5gq1bRKv5SS-MdCJPM,9780 -pip/_internal/baseparser.py,sha256=vzVUbYmVqJlyfiJ_3Phpi33lcdZrRWQIRmYlzt-071g,8524 -pip/_internal/build_env.py,sha256=cI5lEav68fCpfiy-txgO4lc2b1aUtUSer4EzPJXPOYY,4209 -pip/_internal/cache.py,sha256=hurE4ppJjTd4j4PVwjg5zATjajNZLiiL2g1DzzFnE8U,6821 -pip/_internal/cmdoptions.py,sha256=goksRkLvAuxbfocy1pjotEFj3np0kd9g8BgvlvQpVzk,16319 -pip/_internal/compat.py,sha256=oxa3m7C2qQ4cNDi14QPKHV612YGpVoN5Q4cxe04phjc,7698 -pip/_internal/configuration.py,sha256=U0Yd9p131xJh_9DEl75Xlb82rwFzNIaGJqsJP7o2qGY,12982 +pip/_internal/__init__.py,sha256=b0jSFCCViGhB1RWni35_NMkH3Y-mbZrV648DGMagDjs,2869 +pip/_internal/build_env.py,sha256=zKhqmDMnrX5OTSNQ4xBw-mN5mTGVu6wjiNFW-ajWYEI,4797 +pip/_internal/cache.py,sha256=96_aKtDbwgLEVNgNabOT8GrFCYZEACedoiucqU5ccg8,6829 +pip/_internal/configuration.py,sha256=KMgG3ufFrUKX_QESi2cMVvFi47tl845Bg1ZkNthlWik,13243 pip/_internal/download.py,sha256=c5Hkimq39eJdZ6DN0_0etjK43-0a5CK_W_3sVLqH87g,33300 -pip/_internal/exceptions.py,sha256=sgmH3aFK-mR9mGRlsVAJ8HfhRFf8J_zUbLg5vuZcmxM,8221 -pip/_internal/index.py,sha256=jgcLUbIWiwOyS9NPqLNuhau8QLtre1a6EQ-2f9p9lM8,41067 -pip/_internal/locations.py,sha256=rkI3KWmFhMXhPTtoglXf7uZKfJ3em1GF9n7e8mOiiPo,6301 -pip/_internal/pep425tags.py,sha256=luGuXXWlZHEn8rGb0V_jSwWVuO-mcMzOVpezA72YkFQ,10798 -pip/_internal/resolve.py,sha256=YZ6W49hQV3ybjvcj-IdBN-XVaV_SdW7jIePuVWMyKzE,13582 -pip/_internal/status_codes.py,sha256=F6uDG6Gj7RNKQJUDnd87QKqI16Us-t-B0wPF_4QMpWc,156 -pip/_internal/wheel.py,sha256=3IlAsd9COXe2vUWccdv6ia1eCB-rE5vki3bhk18dMao,31844 -pip/_internal/commands/__init__.py,sha256=ICge5zPnW7CdvsioRLqPXpiaGtsN0FR0p5mIZ-moGtU,2246 -pip/_internal/commands/check.py,sha256=XDaA8RbTfhRr0IOmqFrQN6hhnJdXfLgHLBg7zMFNma8,1393 -pip/_internal/commands/completion.py,sha256=WPxXNj2bCfDtqP5Oe04AfzeFbfOHDnmLI41wz4GsruI,2924 -pip/_internal/commands/configuration.py,sha256=kzwz6y3tfreGjIcrXMAa0MvZMvPuIKkbvwzwJ4J-wk4,7116 -pip/_internal/commands/download.py,sha256=oNdLQccS-jxfSS-mrJbh47-32kdPIHMmvzIgucSsvao,9058 -pip/_internal/commands/freeze.py,sha256=87qc7ggLKhEC73x1Vul1Crf9-c3wSMSxS0fZOpJOxb0,3224 -pip/_internal/commands/hash.py,sha256=bskoCvz28Ayp7y2ZmJFjcACxisIjTPjzSfexfT24Ph4,1672 -pip/_internal/commands/help.py,sha256=6a_crHnyoNX0L63W-XMCXG6dseiH4-qPZDVWvf5jXKo,1043 -pip/_internal/commands/install.py,sha256=aTH1-QI_ywbQT0xwVVbdZOPba2CfzNBDb1aP1P6c0ZE,20294 -pip/_internal/commands/list.py,sha256=hrDoyY6hPIo94DrEKyVZl69uKH8uGbnSVOJroh-8yhk,10250 -pip/_internal/commands/search.py,sha256=47OjdUeQDMNUTI-y3hWnCE4iCHbWWAnjKigOY74QgwU,4713 -pip/_internal/commands/show.py,sha256=2aguOk_tp4bqeVUBhqdqtso5pJOBS1YmO_5H8qyO5SM,6280 -pip/_internal/commands/uninstall.py,sha256=rWNszUL0_r1XtZCzemNxDiX5U_PLf9CM1FYfrAU_HJI,2920 -pip/_internal/commands/wheel.py,sha256=bwV-ag37ZAodaqs58WnmQ2sruiFTVwR4ilFJFS8DMC8,7011 +pip/_internal/exceptions.py,sha256=EIGotnq6qM2nbGtnlgZ8Xp5VfP2W4-9UOCzQGMwy5MY,8899 +pip/_internal/index.py,sha256=6CAtZ8QTLcpw0fJqQ9OPu-Os1ettLZtVY1pPSKia8r8,34789 +pip/_internal/locations.py,sha256=ujNrLnA04Y_EmSriO0nS6qkkw_BkPfobB_hdwIDPvpM,6307 +pip/_internal/pep425tags.py,sha256=TQhxOPss4RjxgyVgxpSRe31HaTcWmn-LVjWBbkvkjzk,10845 +pip/_internal/pyproject.py,sha256=fpO52MCa3w5xSlXIBXw39BDTGzP8G4570EW34hVvIKQ,5481 +pip/_internal/resolve.py,sha256=tdepxCewsXXNFKSIYGSxiLvzi1xCv7UVFT9jRCDO90A,13578 +pip/_internal/wheel.py,sha256=fg9E936DaI1LyrBPHqtzHG_WEVyuUwipHISkD6N3jNw,32007 +pip/_internal/cli/__init__.py,sha256=FkHBgpxxb-_gd6r1FjnNhfMOzAUYyXoXKJ6abijfcFU,132 +pip/_internal/cli/autocompletion.py,sha256=ptvsMdGjq42pzoY4skABVF43u2xAtLJlXAulPi-A10Y,6083 +pip/_internal/cli/base_command.py,sha256=ke6af4iWzrZoc3HtiPKnCZJvD6GlX8dRwBwpFCg1axc,9963 +pip/_internal/cli/cmdoptions.py,sha256=WoPPY1uHsDjA_NvZek8Mko38rxraD3pX8eZUkNKvk10,19468 +pip/_internal/cli/main_parser.py,sha256=Ga_kT7if-Gg0rmmRqlGEHW6JWVm9zwzO7igJm6RE9EI,2763 +pip/_internal/cli/parser.py,sha256=VZKUKJPbU6I2cHPLDOikin-aCx7OvLcZ3fzYp3xytd8,9378 +pip/_internal/cli/status_codes.py,sha256=F6uDG6Gj7RNKQJUDnd87QKqI16Us-t-B0wPF_4QMpWc,156 +pip/_internal/commands/__init__.py,sha256=CQAzhVx9ViPtqLNUvAeqnKj5iWfFEcqMx5RlZWjJ30c,2251 +pip/_internal/commands/check.py,sha256=CyeYH2kfDKSGSURoBfWtx-sTcZZQP-bK170NmKYlmsg,1398 +pip/_internal/commands/completion.py,sha256=hqvCvoxsIHjysiD7olHKTqK2lzE1_lS6LWn69kN5qyI,2929 +pip/_internal/commands/configuration.py,sha256=265HWuUxPggCNcIeWHA3p-LDDiRVnexwFgwmHGgWOHY,7125 +pip/_internal/commands/download.py,sha256=D_iGMp3xX2iD7KZYZAjXlYT3rf3xjwxyYe05KE-DVzE,6514 +pip/_internal/commands/freeze.py,sha256=VvS3G0wrm_9BH3B7Ex5msLL_1UQTtCq5G8dDI63Iemo,3259 +pip/_internal/commands/hash.py,sha256=K1JycsD-rpjqrRcL_ijacY9UKmI82pQcLYq4kCM4Pv0,1681 +pip/_internal/commands/help.py,sha256=MwBhPJpW1Dt3GfJV3V8V6kgAy_pXT0jGrZJB1wCTW-E,1090 +pip/_internal/commands/install.py,sha256=tKyzfo5bhDGLVTTQCQJ9PFnDjimQvEWnwIAI2XHpaac,21039 +pip/_internal/commands/list.py,sha256=n740MsR0cG34EuvGWMzdVl0uIA3UIYx1_95FUsTktN0,10272 +pip/_internal/commands/search.py,sha256=sLZ9icKMEEGekHvzRRZMiTd1zCFIZeDptyyU1mQCYzk,4728 +pip/_internal/commands/show.py,sha256=9EVh86vY0NZdlhT-wsuV-zq_MAV6qqV4S1Akn3wkUuw,6289 +pip/_internal/commands/uninstall.py,sha256=h0gfPF5jylDESx_IHgF6bZME7QAEOHzQHdn65GP-jrE,2963 +pip/_internal/commands/wheel.py,sha256=ZuVf_DMpKCUzBVstolvQPAeajQRC51Oky5_hDHzhhFs,7020 pip/_internal/models/__init__.py,sha256=3DHUd_qxpPozfzouoqa9g9ts1Czr5qaHfFxbnxriepM,63 -pip/_internal/models/index.py,sha256=40lauIZxYWPay3T8igBQwjE8SlDAsebphQfRA2WjfJI,418 +pip/_internal/models/candidate.py,sha256=zq2Vb5l5JflrVX7smHTJHQciZWHyoJZuYTLeQa1G16c,741 +pip/_internal/models/format_control.py,sha256=aDbH4D2XuyaGjtRjTLQhNzClAcLZdJCKSHO8xbZSmFA,2202 +pip/_internal/models/index.py,sha256=YI1WlhWfS9mVPY0bIboA5la2pjJ2J0qgPJIbvdEjZBk,996 +pip/_internal/models/link.py,sha256=E61PvS2Wrmb9-zT-eAc_8_xI3C-89wJlpL8SL-mlQmg,3998 pip/_internal/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 pip/_internal/operations/check.py,sha256=ahcOg5p68nNow6_wy5prYYK0KZq22lm0CsJn8AyDMCI,4937 -pip/_internal/operations/freeze.py,sha256=ww6GQnFB2Gs6IV8ObPA53EUy1JvOjgd_XEa6AVTvKMc,10066 -pip/_internal/operations/prepare.py,sha256=Sp8_aGb26jCaPLEt1Ymf_QUstKSW0SQMWopOlbltvS0,14334 +pip/_internal/operations/freeze.py,sha256=lskaBcqf3bPZupG032fuLf76QYv5wpAQ6jsiXac56Bg,10450 +pip/_internal/operations/prepare.py,sha256=atoLFj3OD5KfXsa5dYBMC_mI06l068F5yZhF4jle1JA,14280 pip/_internal/req/__init__.py,sha256=JnNZWvKUQuqAwHh64LCD3zprzWIVQEXChTo2UGHzVqo,2093 -pip/_internal/req/req_file.py,sha256=0AwcYfriooNxlQJjYPOKJywCCSk_C8PcC732LFHN7QQ,11910 -pip/_internal/req/req_install.py,sha256=mzefObnBkthKgc_5SQvmsgZQGWGP1m0gBqUVco4Gab0,43743 -pip/_internal/req/req_set.py,sha256=5nZEZ4C3HUzR7zegLB7DJuUspiTxxi8LM4wj-PH-Yn8,7007 +pip/_internal/req/constructors.py,sha256=97WQp9Svh-Jw3oLZL9_57gJ3zihm5LnWlSRjOwOorDU,9573 +pip/_internal/req/req_file.py,sha256=ORA0GKUjGd6vy7pmBwXR55FFj4h_OxYykFQ6gHuWvt0,11940 +pip/_internal/req/req_install.py,sha256=ry1RtNNCefDHAnf3EeGMpea-9pC6Yk1uHzP0Q5p2Un0,34046 +pip/_internal/req/req_set.py,sha256=nE6oagXJSiQREuuebX3oJO5OHSOVUIlvLLilodetBzc,7264 pip/_internal/req/req_tracker.py,sha256=zH28YHV5TXAVh1ZOEZi6Z1Edkiu26dN2tXfR6VbQ3B4,2370 -pip/_internal/req/req_uninstall.py,sha256=IDBXvY563qXDlObo3uI2MLhNfTYA57JBwdt4q5rRx9Y,16636 +pip/_internal/req/req_uninstall.py,sha256=ORSPah64KOVrKo-InMM3zgS5HQqbl5TLHFnE_Lxstq8,16737 pip/_internal/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/_internal/utils/appdirs.py,sha256=TC14M6zMndoMHrC_ZZB-1b85AtX5RVt-kTAs_vX5mg4,9114 +pip/_internal/utils/appdirs.py,sha256=SPfibHtvOKzD_sHrpEZ60HfLae3GharU4Tg7SB3c-XM,9120 +pip/_internal/utils/compat.py,sha256=LSAvzXcsGY2O2drKIPszR5Ja2G0kup__51l3bx1jR_Q,8015 pip/_internal/utils/deprecation.py,sha256=yQTe6dyWlBfxSBrOv_MdRXF1RPLER_EWOp-pa2zLoZc,3021 pip/_internal/utils/encoding.py,sha256=D8tmfStCah6xh9OLhH9mWLr77q4akhg580YHJMKpq3Y,1025 -pip/_internal/utils/filesystem.py,sha256=buZGDuxZDbWtaW7CWFP9imqx_7EDsqmL7kZdJ5wFPfw,909 +pip/_internal/utils/filesystem.py,sha256=ZOIHbacJ-SJtuZru4GoA5DuSIYyeaE4G5kfZPf5cn1A,915 pip/_internal/utils/glibc.py,sha256=prOrsBjmgkDE-hY4Pl120yF5MIlkkmGrFLs8XfIyT-w,3004 pip/_internal/utils/hashes.py,sha256=rJk-gj6F-sHggXAG97dhynqUHFFgApyZLWgaG2xCHME,2900 -pip/_internal/utils/logging.py,sha256=McWNwXiFT5iXBBcdEZ1f3ntDWNLnAuhCUDL2fLcPIOU,6289 -pip/_internal/utils/misc.py,sha256=G56WFsA2bEl8bX-56hAdo24R1t0KrC4SxoWVnz9GIBg,28604 -pip/_internal/utils/outdated.py,sha256=MQ8vzB7b5SD4Yfe7uskhkvREbShbssicJ97mHXiCF4k,5276 -pip/_internal/utils/packaging.py,sha256=v5aEmCFeq5cJEKU7PwKzAhReDgA4aGeUONSuvkl-Iy0,2277 +pip/_internal/utils/logging.py,sha256=BQeUDEER3zlK0O4yv6DBfz6TK3f9XoLXyDlnB0mZVf0,6295 +pip/_internal/utils/misc.py,sha256=YscDfBiFx1spYOtSgdI_5hnc5BZUysWAyz1aVL5y-48,29904 +pip/_internal/utils/models.py,sha256=DQYZSRhjvSdDTAaJLLCpDtxAn1S_-v_8nlNjv4T2jwY,1042 +pip/_internal/utils/outdated.py,sha256=BXtCMKR6gjTrvMfP3MWzZ1Y4ZU4qqoCfbRNqQCusVt8,5642 +pip/_internal/utils/packaging.py,sha256=Ru8ls_S8PPKR8RKEn7jMetENY_A9jPet1HlhTZwpFxU,2443 pip/_internal/utils/setuptools_build.py,sha256=0blfscmNJW_iZ5DcswJeDB_PbtTEjfK9RL1R1WEDW2E,278 pip/_internal/utils/temp_dir.py,sha256=n2FkVlwRX_hS61fYt3nSAh2e2V6CcZn_dfbPId1pAQE,2615 pip/_internal/utils/typing.py,sha256=ztYtZAcqjCYDwP-WlF6EiAAskAsZBMMXtuqvfgZIlgQ,1139 -pip/_internal/utils/ui.py,sha256=JYw7ny1ytKQJdawBCwE54hS6n1eGGw9hjimBwnFQ52Y,13651 -pip/_internal/vcs/__init__.py,sha256=tFMlJNMnLcaDZB_nkbLBYbk0UbRrKihnKspzfosOHOA,15980 -pip/_internal/vcs/bazaar.py,sha256=UIcvDihmoALsdDjo2gyUtxQpEXimiH5Drrv7KkeoeXY,3582 -pip/_internal/vcs/git.py,sha256=BLbXbsv8punlVEQPz5Rqj9od4-w6TjXZSFgeJfSCFjQ,11275 -pip/_internal/vcs/mercurial.py,sha256=karAzCCaUbtGGYs4mR88yscWTs84BKYIQLejkB34BRs,3438 -pip/_internal/vcs/subversion.py,sha256=RgOv6cHC9nfILa3jjVj4MWaqVjpg8K93T1_2HoB6MRo,8863 +pip/_internal/utils/ui.py,sha256=FW8wdtc7DvNwJClGr_TvGZlqcoO482GYe0UY9nKmpso,13657 +pip/_internal/vcs/__init__.py,sha256=2Ct9ogOwzS6ZKKaEXKN2XDiBOiFHMcejnN1KM21mLrQ,16319 +pip/_internal/vcs/bazaar.py,sha256=rjskVmSSn68O7lC5JrGmDTWXneXFMMJJvj_bbdSM8QA,3669 +pip/_internal/vcs/git.py,sha256=n1cFBqTnLIcxAOClZMgOBqELjEjygDBPZ9z-Q7g0qVQ,12580 +pip/_internal/vcs/mercurial.py,sha256=jVTa0XQpFR6EiBcaqW4E4JjTce_t1tFnKRaIhaIPlS8,3471 +pip/_internal/vcs/subversion.py,sha256=vDLTfcjj0kgqcEsbPBfveC4CRxyhWiOjke-qN0Zr8CE,7676 pip/_vendor/__init__.py,sha256=XnhkujjE1qUGRlYGYbIRrEGYYYBcNLBraE27HH48wYw,4756 pip/_vendor/appdirs.py,sha256=BENKsvcA08IpccD9345-rMrg3aXWFA1q6BFEglnHg6I,24547 pip/_vendor/distro.py,sha256=dOMrjIXv-3GmEbtP-NJc057Sv19P7ZAdke-v0TBeNio,42455 pip/_vendor/ipaddress.py,sha256=2OgbkeAD2rLkcXqbcvof3J5R7lRwjNLoBySyTkBtKnc,79852 -pip/_vendor/pyparsing.py,sha256=YmrfFWmUgne4-FjtSotxVOJvtxea3TOtsrOtrhozkXs,225348 +pip/_vendor/pyparsing.py,sha256=My2ZwDJCEaZkZgZyG9gL--48RLGmf9vnVCTW93rhdYI,226342 pip/_vendor/retrying.py,sha256=k3fflf5_Mm0XcIJYhB7Tj34bqCCPhUDkYbx1NvW2FPE,9972 pip/_vendor/six.py,sha256=A08MPb-Gi9FfInI3IW7HimXFmEH2T2IPzHgDvdhZPRA,30888 pip/_vendor/cachecontrol/__init__.py,sha256=6cRPchVqkAkeUtYTSW8qCetjSqJo-GxP-n4VMVDbvmc,302 @@ -82,9 +91,9 @@ pip/_vendor/cachecontrol/wrapper.py,sha256=sfr9YHWx-5TwNz1H5rT6QOo8ggII6v3vbEDjQ pip/_vendor/cachecontrol/caches/__init__.py,sha256=-gHNKYvaeD0kOk5M74eOrsSgIKUtC6i6GfbmugGweEo,86 pip/_vendor/cachecontrol/caches/file_cache.py,sha256=8vrSzzGcdfEfICago1uSFbkumNJMGLbCdEkXsmUIExw,4177 pip/_vendor/cachecontrol/caches/redis_cache.py,sha256=HxelMpNCo-dYr2fiJDwM3hhhRmxUYtB5tXm1GpAAT4Y,856 -pip/_vendor/certifi/__init__.py,sha256=KHDlQtQQTRmOG0TJi12ZIE5WWq2tYHM5ax30EX6UJ04,63 -pip/_vendor/certifi/__main__.py,sha256=FiOYt1Fltst7wk9DRa6GCoBr8qBUxlNQu_MKJf04E6s,41 -pip/_vendor/certifi/cacert.pem,sha256=0lwMLbfi4umzDdOmdLMdrNkgZxw-5y6PCE10PrnJy-k,268839 +pip/_vendor/certifi/__init__.py,sha256=5lCYV1iWxoirX1OAaSHkBYUuZGdcwEjEBS6DS_trL0s,63 +pip/_vendor/certifi/__main__.py,sha256=NaCn6WtWME-zzVWQ2j4zFyl8cY4knDa9CwtHNIeFPhM,53 +pip/_vendor/certifi/cacert.pem,sha256=XA-4HVBsOrBD5lfg-b3PiUzAvwUd2qlIzwXypIMIRGM,263074 pip/_vendor/certifi/core.py,sha256=xPQDdG_siy5A7BfqGWa7RJhcA61xXEqPiSrw9GNyhHE,836 pip/_vendor/chardet/__init__.py,sha256=YsP5wQlsHJ2auF1RZJfypiSrCA7_bQiRm3ES_NI76-Y,1559 pip/_vendor/chardet/big5freq.py,sha256=D_zK5GyzoVsRes0HkLJziltFQX0bKCLOrFe9_xDvO_8,31254 @@ -208,17 +217,24 @@ pip/_vendor/msgpack/__init__.py,sha256=y0bk2YbzK6J2e0J_dyreN6nD7yM2IezT6m_tU2h-M pip/_vendor/msgpack/_version.py,sha256=dN7wVIjbyuQIJ35B2o6gymQNDLPlj_7-uTfgCv7KErM,20 pip/_vendor/msgpack/exceptions.py,sha256=lPkAi_u12NlFajDz4FELSHEdfU8hrR3zeTvKX8aQuz4,1056 pip/_vendor/msgpack/fallback.py,sha256=h0ll8xnq12mI9PuQ9Qd_Ihtt08Sp8L0JqhG9KY8Vyjk,36411 -pip/_vendor/packaging/__about__.py,sha256=keVv6YdCEOUPM4CfpYUjc_mYRH0kiV1Yd_t2kcw4Fck,720 +pip/_vendor/packaging/__about__.py,sha256=mH-sMIEu48PzdYakZ6Y6OBzL3TlSetzz1fQSkCXiy30,720 pip/_vendor/packaging/__init__.py,sha256=_vNac5TrzwsrzbOFIbF-5cHqc_Y2aPT2D7zrIR06BOo,513 pip/_vendor/packaging/_compat.py,sha256=Vi_A0rAQeHbU-a9X0tt1yQm9RqkgQbDSxzRw8WlU9kA,860 pip/_vendor/packaging/_structures.py,sha256=DCpKtb7u94_oqgVsIJQTrTyZcb3Gz7sSGbk9vYDMME0,1418 pip/_vendor/packaging/markers.py,sha256=ftZegBU5oEmulEKApDGEPgti2lYIchFQHAfH9tZy3_U,8221 -pip/_vendor/packaging/requirements.py,sha256=GIeW8Qx6DmjvbsumBHPUtn4JrWtHYUyLtNKLAsVmUF8,4441 -pip/_vendor/packaging/specifiers.py,sha256=cDlvKb4hRNwZ72VKE7KP9QbNOmqDPIvRrUFOwhz1e3Q,28026 +pip/_vendor/packaging/requirements.py,sha256=xIWdoZXVKhUHxqFP5xmnKylM7NHXQS48hUfIIX1PvY0,4439 +pip/_vendor/packaging/specifiers.py,sha256=pFp716eLYBRt0eLNsy6cnWD9dyMKq-Zag7bsLbLv4Fs,28026 pip/_vendor/packaging/utils.py,sha256=c9obOpok2CpKDApkc2M5ma0YFnT-jtt4I6XI4F0jYiI,1580 pip/_vendor/packaging/version.py,sha256=MKL8nbKLPLGPouIwFvwSVnYRzNpkMo5AIcsa6LGqDF8,12219 -pip/_vendor/pkg_resources/__init__.py,sha256=eyzgYXSzEXhTkYxCorE8r8yu4F5UhIaD8t73eyUiX_U,103750 -pip/_vendor/pkg_resources/py31compat.py,sha256=-ysVqoxLetAnL94uM0kHkomKQTC1JZLN2ZUjqUhMeKE,600 +pip/_vendor/pep517/__init__.py,sha256=GH4HshnLERtjAjkY0zHoz3f7-35UcIvr27iFWSOUazU,82 +pip/_vendor/pep517/_in_process.py,sha256=iWpagFk2GhNBbvl-Ca2RagfD0ALuits4WWSM6nQMTdg,5831 +pip/_vendor/pep517/check.py,sha256=Yp2NHW71DIOCgkFb7HKJOzKmsum_s_OokRP6HnR3bTg,5761 +pip/_vendor/pep517/colorlog.py,sha256=2AJuPI_DHM5T9IDgcTwf0E8suyHAFnfsesogr0AB7RQ,4048 +pip/_vendor/pep517/compat.py,sha256=4SFG4QN-cNj8ebSa0wV0HUtEEQWwmbok2a0uk1gYEOM,631 +pip/_vendor/pep517/envbuild.py,sha256=osRsJVd7hir1w_uFXiVeeWxfJ3iYhwxsKRgNBWpqtCI,5672 +pip/_vendor/pep517/wrappers.py,sha256=RhgWm-MLxpYPgc9cZ3-A3ToN99ZzgM8-ia4FDB58koM,5018 +pip/_vendor/pkg_resources/__init__.py,sha256=ykZI7-YBIAQ7ztWf0RskP8Oy1VQU88o-16PJbIMCtLg,103915 +pip/_vendor/pkg_resources/py31compat.py,sha256=CRk8fkiPRDLsbi5pZcKsHI__Pbmh_94L8mr9Qy9Ab2U,562 pip/_vendor/progress/__init__.py,sha256=Hv3Y8Hr6RyM34NdZkrZQWMURjS2h5sONRHJSvZXWZgQ,3188 pip/_vendor/progress/bar.py,sha256=hlkDAEv9pRRiWqR5XL6vIAgMG4u_dBGEW_8klQhBRq0,2942 pip/_vendor/progress/counter.py,sha256=XtBuZY4yYmr50E2A_fAzjWhm0IkwaVwxNsNVYDE7nsw,1528 @@ -226,9 +242,9 @@ pip/_vendor/progress/helpers.py,sha256=6FsBLh_xUlKiVua-zZIutCjxth-IO8FtyUj6I2tx9 pip/_vendor/progress/spinner.py,sha256=m7bASI2GUbLFG-PbAefdHtrrWWlJLFhhSBbw70gp2TY,1439 pip/_vendor/pytoml/__init__.py,sha256=q12Xv23Tta44gtK4HGK68Gr4tKfciILidFPmPuoIqIo,92 pip/_vendor/pytoml/core.py,sha256=9CrLLTs1PdWjEwRnYzt_i4dhHcZvGxs_GsMlYAX3iY4,509 -pip/_vendor/pytoml/parser.py,sha256=DSFFTgP0RhBE989f3cTlTvJP6pRCROYMHYBXYg-TZNk,11239 +pip/_vendor/pytoml/parser.py,sha256=mcTzHB2GQGyK8KVwuQ0EraSz_78O36U60NqHBtgVmV0,11247 pip/_vendor/pytoml/writer.py,sha256=-mSOVGaiGLrpj5BRR7czmquZXJGflcElHrwAd33J48A,3815 -pip/_vendor/requests/__init__.py,sha256=o2ykqAY7qT5lS007T6tDksb5uBB12oov39xXZ-E88vQ,4203 +pip/_vendor/requests/__init__.py,sha256=OrwNk1JwZGqIQ4JVGgMbfpstqey-oHS_Re_Dw6D4ciI,4209 pip/_vendor/requests/__version__.py,sha256=rJ2xgNOLhjspGkNPfgXTBctqqvsf2uJMFTaE0rlVtbI,436 pip/_vendor/requests/_internal_utils.py,sha256=Zx3PnEUccyfsB-ie11nZVAW8qClJy0gx1qNME7rgT18,1096 pip/_vendor/requests/adapters.py,sha256=y5DISepvSsGlu3II_VUsdgKBej1dGY4b5beRrTE2tsI,21428 @@ -287,24 +303,26 @@ pip/_vendor/webencodings/labels.py,sha256=4AO_KxTddqGtrL9ns7kAPjb0CcN6xsCIxbK37H pip/_vendor/webencodings/mklabels.py,sha256=GYIeywnpaLnP0GSic8LFWgd0UVvO_l1Nc6YoF-87R_4,1305 pip/_vendor/webencodings/tests.py,sha256=OtGLyjhNY1fvkW1GvLJ_FV9ZoqC9Anyjr7q3kxTbzNs,6563 pip/_vendor/webencodings/x_user_defined.py,sha256=yOqWSdmpytGfUgh_Z6JYgDNhoc-BAHyyeeT15Fr42tM,4307 -pip-18.0.dist-info/LICENSE.txt,sha256=ORqHhOMZ2uVDFHfUzJvFBPxdcf2eieHIDxzThV9dfPo,1090 -pip-18.0.dist-info/METADATA,sha256=2D-DQBbJs1zVe4qxVESZ4cxB8gEWzsrbMQDAamJSZeQ,2538 -pip-18.0.dist-info/RECORD,, -pip-18.0.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110 -pip-18.0.dist-info/entry_points.txt,sha256=S_zfxY25QtQDVY1BiLAmOKSkkI5llzCKPLiYOSEupsY,98 -pip-18.0.dist-info/top_level.txt,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pip-18.1.dist-info/LICENSE.txt,sha256=ORqHhOMZ2uVDFHfUzJvFBPxdcf2eieHIDxzThV9dfPo,1090 +pip-18.1.dist-info/METADATA,sha256=D7pqBJTuqM9w_HTW91a0XGjLT9vynlBAE4pPCt_W_UE,2588 +pip-18.1.dist-info/WHEEL,sha256=8T8fxefr_r-A79qbOJ9d_AaEgkpCGmEPHc-gpCq5BRg,110 +pip-18.1.dist-info/entry_points.txt,sha256=S_zfxY25QtQDVY1BiLAmOKSkkI5llzCKPLiYOSEupsY,98 +pip-18.1.dist-info/top_level.txt,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pip-18.1.dist-info/RECORD,, ../../../bin/pip,sha256=dyGpKEvLojgvXjnThiu5fDiLrFcyS-SjIQezTA8rGGE,244 ../../../bin/pip3,sha256=dyGpKEvLojgvXjnThiu5fDiLrFcyS-SjIQezTA8rGGE,244 ../../../bin/pip3.6,sha256=dyGpKEvLojgvXjnThiu5fDiLrFcyS-SjIQezTA8rGGE,244 -pip-18.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pip-18.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 pip/_internal/utils/__pycache__/logging.cpython-36.pyc,, pip/_internal/utils/__pycache__/appdirs.cpython-36.pyc,, pip/_internal/utils/__pycache__/deprecation.cpython-36.pyc,, pip/_internal/utils/__pycache__/filesystem.cpython-36.pyc,, pip/_internal/utils/__pycache__/glibc.cpython-36.pyc,, +pip/_internal/utils/__pycache__/models.cpython-36.pyc,, pip/_internal/utils/__pycache__/outdated.cpython-36.pyc,, pip/_internal/utils/__pycache__/encoding.cpython-36.pyc,, pip/_internal/utils/__pycache__/hashes.cpython-36.pyc,, +pip/_internal/utils/__pycache__/compat.cpython-36.pyc,, pip/_internal/utils/__pycache__/typing.cpython-36.pyc,, pip/_internal/utils/__pycache__/packaging.cpython-36.pyc,, pip/_internal/utils/__pycache__/temp_dir.cpython-36.pyc,, @@ -313,23 +331,29 @@ pip/_internal/utils/__pycache__/setuptools_build.cpython-36.pyc,, pip/_internal/utils/__pycache__/__init__.cpython-36.pyc,, pip/_internal/utils/__pycache__/misc.cpython-36.pyc,, pip/_internal/models/__pycache__/index.cpython-36.pyc,, +pip/_internal/models/__pycache__/candidate.cpython-36.pyc,, +pip/_internal/models/__pycache__/format_control.cpython-36.pyc,, pip/_internal/models/__pycache__/__init__.cpython-36.pyc,, +pip/_internal/models/__pycache__/link.cpython-36.pyc,, pip/_internal/__pycache__/build_env.cpython-36.pyc,, pip/_internal/__pycache__/exceptions.cpython-36.pyc,, pip/_internal/__pycache__/wheel.cpython-36.pyc,, -pip/_internal/__pycache__/status_codes.cpython-36.pyc,, pip/_internal/__pycache__/index.cpython-36.pyc,, pip/_internal/__pycache__/download.cpython-36.pyc,, pip/_internal/__pycache__/resolve.cpython-36.pyc,, -pip/_internal/__pycache__/basecommand.cpython-36.pyc,, pip/_internal/__pycache__/locations.cpython-36.pyc,, -pip/_internal/__pycache__/compat.cpython-36.pyc,, pip/_internal/__pycache__/pep425tags.cpython-36.pyc,, pip/_internal/__pycache__/configuration.cpython-36.pyc,, pip/_internal/__pycache__/cache.cpython-36.pyc,, pip/_internal/__pycache__/__init__.cpython-36.pyc,, -pip/_internal/__pycache__/baseparser.cpython-36.pyc,, -pip/_internal/__pycache__/cmdoptions.cpython-36.pyc,, +pip/_internal/__pycache__/pyproject.cpython-36.pyc,, +pip/_internal/cli/__pycache__/parser.cpython-36.pyc,, +pip/_internal/cli/__pycache__/status_codes.cpython-36.pyc,, +pip/_internal/cli/__pycache__/autocompletion.cpython-36.pyc,, +pip/_internal/cli/__pycache__/base_command.cpython-36.pyc,, +pip/_internal/cli/__pycache__/main_parser.cpython-36.pyc,, +pip/_internal/cli/__pycache__/__init__.cpython-36.pyc,, +pip/_internal/cli/__pycache__/cmdoptions.cpython-36.pyc,, pip/_internal/operations/__pycache__/prepare.cpython-36.pyc,, pip/_internal/operations/__pycache__/check.cpython-36.pyc,, pip/_internal/operations/__pycache__/freeze.cpython-36.pyc,, @@ -340,6 +364,7 @@ pip/_internal/req/__pycache__/req_uninstall.cpython-36.pyc,, pip/_internal/req/__pycache__/req_install.cpython-36.pyc,, pip/_internal/req/__pycache__/req_file.cpython-36.pyc,, pip/_internal/req/__pycache__/__init__.cpython-36.pyc,, +pip/_internal/req/__pycache__/constructors.cpython-36.pyc,, pip/_internal/vcs/__pycache__/subversion.cpython-36.pyc,, pip/_internal/vcs/__pycache__/git.cpython-36.pyc,, pip/_internal/vcs/__pycache__/__init__.cpython-36.pyc,, @@ -427,6 +452,13 @@ pip/_vendor/progress/__pycache__/counter.cpython-36.pyc,, pip/_vendor/progress/__pycache__/spinner.cpython-36.pyc,, pip/_vendor/progress/__pycache__/helpers.cpython-36.pyc,, pip/_vendor/progress/__pycache__/__init__.cpython-36.pyc,, +pip/_vendor/pep517/__pycache__/_in_process.cpython-36.pyc,, +pip/_vendor/pep517/__pycache__/check.cpython-36.pyc,, +pip/_vendor/pep517/__pycache__/envbuild.cpython-36.pyc,, +pip/_vendor/pep517/__pycache__/compat.cpython-36.pyc,, +pip/_vendor/pep517/__pycache__/wrappers.cpython-36.pyc,, +pip/_vendor/pep517/__pycache__/__init__.cpython-36.pyc,, +pip/_vendor/pep517/__pycache__/colorlog.cpython-36.pyc,, pip/_vendor/distlib/_backport/__pycache__/shutil.cpython-36.pyc,, pip/_vendor/distlib/_backport/__pycache__/sysconfig.cpython-36.pyc,, pip/_vendor/distlib/_backport/__pycache__/__init__.cpython-36.pyc,, diff --git a/thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/WHEEL b/thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/WHEEL new file mode 100644 index 0000000..1001235 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.32.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/thesisenv/lib/python3.6/site-packages/pip-18.0.dist-info/entry_points.txt b/thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/entry_points.txt similarity index 100% rename from thesisenv/lib/python3.6/site-packages/pip-18.0.dist-info/entry_points.txt rename to thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/entry_points.txt diff --git a/thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/top_level.txt b/thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/top_level.txt new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip-18.1.dist-info/top_level.txt @@ -0,0 +1 @@ +pip diff --git a/thesisenv/lib/python3.6/site-packages/pip/__init__.py b/thesisenv/lib/python3.6/site-packages/pip/__init__.py index 9227d0e..ae265fa 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/__init__.py +++ b/thesisenv/lib/python3.6/site-packages/pip/__init__.py @@ -1 +1 @@ -__version__ = "18.0" +__version__ = "18.1" diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/__init__.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/__init__.py index 250ed1d..276124d 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/__init__.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/__init__.py @@ -4,7 +4,6 @@ from __future__ import absolute_import import locale import logging import os -import optparse import warnings import sys @@ -38,17 +37,12 @@ else: else: securetransport.inject_into_urllib3() -from pip import __version__ -from pip._internal import cmdoptions -from pip._internal.exceptions import CommandError, PipError -from pip._internal.utils.misc import get_installed_distributions, get_prog +from pip._internal.cli.autocompletion import autocomplete +from pip._internal.cli.main_parser import parse_command +from pip._internal.commands import commands_dict +from pip._internal.exceptions import PipError from pip._internal.utils import deprecation from pip._internal.vcs import git, mercurial, subversion, bazaar # noqa -from pip._internal.baseparser import ( - ConfigOptionParser, UpdatingDefaultsHelpFormatter, -) -from pip._internal.commands import get_summaries, get_similar_commands -from pip._internal.commands import commands_dict from pip._vendor.urllib3.exceptions import InsecureRequestWarning logger = logging.getLogger(__name__) @@ -57,232 +51,6 @@ logger = logging.getLogger(__name__) warnings.filterwarnings("ignore", category=InsecureRequestWarning) -def autocomplete(): - """Command and option completion for the main option parser (and options) - and its subcommands (and options). - - Enable by sourcing one of the completion shell scripts (bash, zsh or fish). - """ - # Don't complete if user hasn't sourced bash_completion file. - if 'PIP_AUTO_COMPLETE' not in os.environ: - return - cwords = os.environ['COMP_WORDS'].split()[1:] - cword = int(os.environ['COMP_CWORD']) - try: - current = cwords[cword - 1] - except IndexError: - current = '' - - subcommands = [cmd for cmd, summary in get_summaries()] - options = [] - # subcommand - try: - subcommand_name = [w for w in cwords if w in subcommands][0] - except IndexError: - subcommand_name = None - - parser = create_main_parser() - # subcommand options - if subcommand_name: - # special case: 'help' subcommand has no options - if subcommand_name == 'help': - sys.exit(1) - # special case: list locally installed dists for show and uninstall - should_list_installed = ( - subcommand_name in ['show', 'uninstall'] and - not current.startswith('-') - ) - if should_list_installed: - installed = [] - lc = current.lower() - for dist in get_installed_distributions(local_only=True): - if dist.key.startswith(lc) and dist.key not in cwords[1:]: - installed.append(dist.key) - # if there are no dists installed, fall back to option completion - if installed: - for dist in installed: - print(dist) - sys.exit(1) - - subcommand = commands_dict[subcommand_name]() - - for opt in subcommand.parser.option_list_all: - if opt.help != optparse.SUPPRESS_HELP: - for opt_str in opt._long_opts + opt._short_opts: - options.append((opt_str, opt.nargs)) - - # filter out previously specified options from available options - prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]] - options = [(x, v) for (x, v) in options if x not in prev_opts] - # filter options by current input - options = [(k, v) for k, v in options if k.startswith(current)] - # get completion type given cwords and available subcommand options - completion_type = get_path_completion_type( - cwords, cword, subcommand.parser.option_list_all, - ) - # get completion files and directories if ``completion_type`` is - # ````, ```` or ```` - if completion_type: - options = auto_complete_paths(current, completion_type) - options = ((opt, 0) for opt in options) - for option in options: - opt_label = option[0] - # append '=' to options which require args - if option[1] and option[0][:2] == "--": - opt_label += '=' - print(opt_label) - else: - # show main parser options only when necessary - - opts = [i.option_list for i in parser.option_groups] - opts.append(parser.option_list) - opts = (o for it in opts for o in it) - if current.startswith('-'): - for opt in opts: - if opt.help != optparse.SUPPRESS_HELP: - subcommands += opt._long_opts + opt._short_opts - else: - # get completion type given cwords and all available options - completion_type = get_path_completion_type(cwords, cword, opts) - if completion_type: - subcommands = auto_complete_paths(current, completion_type) - - print(' '.join([x for x in subcommands if x.startswith(current)])) - sys.exit(1) - - -def get_path_completion_type(cwords, cword, opts): - """Get the type of path completion (``file``, ``dir``, ``path`` or None) - - :param cwords: same as the environmental variable ``COMP_WORDS`` - :param cword: same as the environmental variable ``COMP_CWORD`` - :param opts: The available options to check - :return: path completion type (``file``, ``dir``, ``path`` or None) - """ - if cword < 2 or not cwords[cword - 2].startswith('-'): - return - for opt in opts: - if opt.help == optparse.SUPPRESS_HELP: - continue - for o in str(opt).split('/'): - if cwords[cword - 2].split('=')[0] == o: - if any(x in ('path', 'file', 'dir') - for x in opt.metavar.split('/')): - return opt.metavar - - -def auto_complete_paths(current, completion_type): - """If ``completion_type`` is ``file`` or ``path``, list all regular files - and directories starting with ``current``; otherwise only list directories - starting with ``current``. - - :param current: The word to be completed - :param completion_type: path completion type(`file`, `path` or `dir`)i - :return: A generator of regular files and/or directories - """ - directory, filename = os.path.split(current) - current_path = os.path.abspath(directory) - # Don't complete paths if they can't be accessed - if not os.access(current_path, os.R_OK): - return - filename = os.path.normcase(filename) - # list all files that start with ``filename`` - file_list = (x for x in os.listdir(current_path) - if os.path.normcase(x).startswith(filename)) - for f in file_list: - opt = os.path.join(current_path, f) - comp_file = os.path.normcase(os.path.join(directory, f)) - # complete regular files when there is not ```` after option - # complete directories when there is ````, ```` or - # ````after option - if completion_type != 'dir' and os.path.isfile(opt): - yield comp_file - elif os.path.isdir(opt): - yield os.path.join(comp_file, '') - - -def create_main_parser(): - parser_kw = { - 'usage': '\n%prog [options]', - 'add_help_option': False, - 'formatter': UpdatingDefaultsHelpFormatter(), - 'name': 'global', - 'prog': get_prog(), - } - - parser = ConfigOptionParser(**parser_kw) - parser.disable_interspersed_args() - - pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - parser.version = 'pip %s from %s (python %s)' % ( - __version__, pip_pkg_dir, sys.version[:3], - ) - - # add the general options - gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser) - parser.add_option_group(gen_opts) - - parser.main = True # so the help formatter knows - - # create command listing for description - command_summaries = get_summaries() - description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries] - parser.description = '\n'.join(description) - - return parser - - -def parseopts(args): - parser = create_main_parser() - - # Note: parser calls disable_interspersed_args(), so the result of this - # call is to split the initial args into the general options before the - # subcommand and everything else. - # For example: - # args: ['--timeout=5', 'install', '--user', 'INITools'] - # general_options: ['--timeout==5'] - # args_else: ['install', '--user', 'INITools'] - general_options, args_else = parser.parse_args(args) - - # --version - if general_options.version: - sys.stdout.write(parser.version) - sys.stdout.write(os.linesep) - sys.exit() - - # pip || pip help -> print_help() - if not args_else or (args_else[0] == 'help' and len(args_else) == 1): - parser.print_help() - sys.exit() - - # the subcommand name - cmd_name = args_else[0] - - if cmd_name not in commands_dict: - guess = get_similar_commands(cmd_name) - - msg = ['unknown command "%s"' % cmd_name] - if guess: - msg.append('maybe you meant "%s"' % guess) - - raise CommandError(' - '.join(msg)) - - # all the args without the subcommand - cmd_args = args[:] - cmd_args.remove(cmd_name) - - return cmd_name, cmd_args - - -def check_isolated(args): - isolated = False - - if "--isolated" in args: - isolated = True - - return isolated - - def main(args=None): if args is None: args = sys.argv[1:] @@ -293,7 +61,7 @@ def main(args=None): autocomplete() try: - cmd_name, cmd_args = parseopts(args) + cmd_name, cmd_args = parse_command(args) except PipError as exc: sys.stderr.write("ERROR: %s" % exc) sys.stderr.write(os.linesep) @@ -306,5 +74,5 @@ def main(args=None): except locale.Error as e: # setlocale can apparently crash if locale are uninitialized logger.debug("Ignoring error %s when setting locale", e) - command = commands_dict[cmd_name](isolated=check_isolated(cmd_args)) + command = commands_dict[cmd_name](isolated=("--isolated" in cmd_args)) return command.main(cmd_args) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/build_env.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/build_env.py index f225f76..673409d 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/build_env.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/build_env.py @@ -7,6 +7,8 @@ import sys from distutils.sysconfig import get_python_lib from sysconfig import get_paths +from pip._vendor.pkg_resources import Requirement, VersionConflict, WorkingSet + from pip._internal.utils.misc import call_subprocess from pip._internal.utils.temp_dir import TempDirectory from pip._internal.utils.ui import open_spinner @@ -75,6 +77,20 @@ class BuildEnvironment(object): def cleanup(self): self._temp_dir.cleanup() + def missing_requirements(self, reqs): + """Return a list of the requirements from reqs that are not present + """ + missing = [] + with self: + ws = WorkingSet(os.environ["PYTHONPATH"].split(os.pathsep)) + for req in reqs: + try: + if ws.find(Requirement.parse(req)) is None: + missing.append(req) + except VersionConflict: + missing.append(req) + return missing + def install_requirements(self, finder, requirements, message): args = [ sys.executable, '-m', 'pip', 'install', '--ignore-installed', diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/cache.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/cache.py index 1aa17aa..33bec97 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/cache.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/cache.py @@ -8,9 +8,9 @@ import os from pip._vendor.packaging.utils import canonicalize_name -from pip._internal import index -from pip._internal.compat import expanduser from pip._internal.download import path_to_url +from pip._internal.models.link import Link +from pip._internal.utils.compat import expanduser from pip._internal.utils.temp_dir import TempDirectory from pip._internal.wheel import InvalidWheelFilename, Wheel @@ -22,7 +22,7 @@ class Cache(object): :param cache_dir: The root of the cache. - :param format_control: A pip.index.FormatControl object to limit + :param format_control: An object of FormatControl class to limit binaries being read from the cache. :param allowed_formats: which formats of files the cache should store. ('binary' and 'source' are the only allowed values) @@ -72,8 +72,8 @@ class Cache(object): return [] canonical_name = canonicalize_name(package_name) - formats = index.fmt_ctl_formats( - self.format_control, canonical_name + formats = self.format_control.get_allowed_formats( + canonical_name ) if not self.allowed_formats.intersection(formats): return [] @@ -101,7 +101,7 @@ class Cache(object): root = self.get_path_for_link(link) path = os.path.join(root, candidate) - return index.Link(path_to_url(path)) + return Link(path_to_url(path)) def cleanup(self): pass diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/cli/__init__.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/cli/__init__.py new file mode 100644 index 0000000..e589bb9 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/cli/__init__.py @@ -0,0 +1,4 @@ +"""Subpackage containing all of pip's command line interface related code +""" + +# This file intentionally does not import submodules diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/cli/autocompletion.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/cli/autocompletion.py new file mode 100644 index 0000000..0a04199 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/cli/autocompletion.py @@ -0,0 +1,152 @@ +"""Logic that powers autocompletion installed by ``pip completion``. +""" + +import optparse +import os +import sys + +from pip._internal.cli.main_parser import create_main_parser +from pip._internal.commands import commands_dict, get_summaries +from pip._internal.utils.misc import get_installed_distributions + + +def autocomplete(): + """Entry Point for completion of main and subcommand options. + """ + # Don't complete if user hasn't sourced bash_completion file. + if 'PIP_AUTO_COMPLETE' not in os.environ: + return + cwords = os.environ['COMP_WORDS'].split()[1:] + cword = int(os.environ['COMP_CWORD']) + try: + current = cwords[cword - 1] + except IndexError: + current = '' + + subcommands = [cmd for cmd, summary in get_summaries()] + options = [] + # subcommand + try: + subcommand_name = [w for w in cwords if w in subcommands][0] + except IndexError: + subcommand_name = None + + parser = create_main_parser() + # subcommand options + if subcommand_name: + # special case: 'help' subcommand has no options + if subcommand_name == 'help': + sys.exit(1) + # special case: list locally installed dists for show and uninstall + should_list_installed = ( + subcommand_name in ['show', 'uninstall'] and + not current.startswith('-') + ) + if should_list_installed: + installed = [] + lc = current.lower() + for dist in get_installed_distributions(local_only=True): + if dist.key.startswith(lc) and dist.key not in cwords[1:]: + installed.append(dist.key) + # if there are no dists installed, fall back to option completion + if installed: + for dist in installed: + print(dist) + sys.exit(1) + + subcommand = commands_dict[subcommand_name]() + + for opt in subcommand.parser.option_list_all: + if opt.help != optparse.SUPPRESS_HELP: + for opt_str in opt._long_opts + opt._short_opts: + options.append((opt_str, opt.nargs)) + + # filter out previously specified options from available options + prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]] + options = [(x, v) for (x, v) in options if x not in prev_opts] + # filter options by current input + options = [(k, v) for k, v in options if k.startswith(current)] + # get completion type given cwords and available subcommand options + completion_type = get_path_completion_type( + cwords, cword, subcommand.parser.option_list_all, + ) + # get completion files and directories if ``completion_type`` is + # ````, ```` or ```` + if completion_type: + options = auto_complete_paths(current, completion_type) + options = ((opt, 0) for opt in options) + for option in options: + opt_label = option[0] + # append '=' to options which require args + if option[1] and option[0][:2] == "--": + opt_label += '=' + print(opt_label) + else: + # show main parser options only when necessary + + opts = [i.option_list for i in parser.option_groups] + opts.append(parser.option_list) + opts = (o for it in opts for o in it) + if current.startswith('-'): + for opt in opts: + if opt.help != optparse.SUPPRESS_HELP: + subcommands += opt._long_opts + opt._short_opts + else: + # get completion type given cwords and all available options + completion_type = get_path_completion_type(cwords, cword, opts) + if completion_type: + subcommands = auto_complete_paths(current, completion_type) + + print(' '.join([x for x in subcommands if x.startswith(current)])) + sys.exit(1) + + +def get_path_completion_type(cwords, cword, opts): + """Get the type of path completion (``file``, ``dir``, ``path`` or None) + + :param cwords: same as the environmental variable ``COMP_WORDS`` + :param cword: same as the environmental variable ``COMP_CWORD`` + :param opts: The available options to check + :return: path completion type (``file``, ``dir``, ``path`` or None) + """ + if cword < 2 or not cwords[cword - 2].startswith('-'): + return + for opt in opts: + if opt.help == optparse.SUPPRESS_HELP: + continue + for o in str(opt).split('/'): + if cwords[cword - 2].split('=')[0] == o: + if not opt.metavar or any( + x in ('path', 'file', 'dir') + for x in opt.metavar.split('/')): + return opt.metavar + + +def auto_complete_paths(current, completion_type): + """If ``completion_type`` is ``file`` or ``path``, list all regular files + and directories starting with ``current``; otherwise only list directories + starting with ``current``. + + :param current: The word to be completed + :param completion_type: path completion type(`file`, `path` or `dir`)i + :return: A generator of regular files and/or directories + """ + directory, filename = os.path.split(current) + current_path = os.path.abspath(directory) + # Don't complete paths if they can't be accessed + if not os.access(current_path, os.R_OK): + return + filename = os.path.normcase(filename) + # list all files that start with ``filename`` + file_list = (x for x in os.listdir(current_path) + if os.path.normcase(x).startswith(filename)) + for f in file_list: + opt = os.path.join(current_path, f) + comp_file = os.path.normcase(os.path.join(directory, f)) + # complete regular files when there is not ```` after option + # complete directories when there is ````, ```` or + # ````after option + if completion_type != 'dir' and os.path.isfile(opt): + yield comp_file + elif os.path.isdir(opt): + yield os.path.join(comp_file, '') diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/basecommand.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/cli/base_command.py similarity index 93% rename from thesisenv/lib/python3.6/site-packages/pip/_internal/basecommand.py rename to thesisenv/lib/python3.6/site-packages/pip/_internal/cli/base_command.py index f0fe37e..dac4b05 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/basecommand.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/cli/base_command.py @@ -7,10 +7,14 @@ import optparse import os import sys -from pip._internal import cmdoptions -from pip._internal.baseparser import ( +from pip._internal.cli import cmdoptions +from pip._internal.cli.parser import ( ConfigOptionParser, UpdatingDefaultsHelpFormatter, ) +from pip._internal.cli.status_codes import ( + ERROR, PREVIOUS_BUILD_DIR_ERROR, SUCCESS, UNKNOWN_ERROR, + VIRTUALENV_NOT_FOUND, +) from pip._internal.download import PipSession from pip._internal.exceptions import ( BadCommand, CommandError, InstallationError, PreviousBuildDirError, @@ -18,12 +22,10 @@ from pip._internal.exceptions import ( ) from pip._internal.index import PackageFinder from pip._internal.locations import running_under_virtualenv -from pip._internal.req.req_file import parse_requirements -from pip._internal.req.req_install import InstallRequirement -from pip._internal.status_codes import ( - ERROR, PREVIOUS_BUILD_DIR_ERROR, SUCCESS, UNKNOWN_ERROR, - VIRTUALENV_NOT_FOUND, +from pip._internal.req.constructors import ( + install_req_from_editable, install_req_from_line, ) +from pip._internal.req.req_file import parse_requirements from pip._internal.utils.logging import setup_logging from pip._internal.utils.misc import get_prog, normalize_path from pip._internal.utils.outdated import pip_version_check @@ -168,12 +170,14 @@ class Command(object): return UNKNOWN_ERROR finally: - # Check if we're using the latest version of pip available - skip_version_check = ( - options.disable_pip_version_check or - getattr(options, "no_index", False) + allow_version_check = ( + # Does this command have the index_group options? + hasattr(options, "no_index") and + # Is this command allowed to perform this check? + not (options.disable_pip_version_check or options.no_index) ) - if not skip_version_check: + # Check if we're using the latest version of pip available + if allow_version_check: session = self._build_session( options, retries=0, @@ -208,7 +212,7 @@ class RequirementCommand(Command): requirement_set.add_requirement(req_to_add) for req in args: - req_to_add = InstallRequirement.from_line( + req_to_add = install_req_from_line( req, None, isolated=options.isolated_mode, wheel_cache=wheel_cache ) @@ -216,7 +220,7 @@ class RequirementCommand(Command): requirement_set.add_requirement(req_to_add) for req in options.editables: - req_to_add = InstallRequirement.from_editable( + req_to_add = install_req_from_editable( req, isolated=options.isolated_mode, wheel_cache=wheel_cache diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/cmdoptions.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/cli/cmdoptions.py similarity index 81% rename from thesisenv/lib/python3.6/site-packages/pip/_internal/cmdoptions.py rename to thesisenv/lib/python3.6/site-packages/pip/_internal/cli/cmdoptions.py index d9c8dd7..3033cd4 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/cmdoptions.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/cli/cmdoptions.py @@ -13,10 +13,9 @@ import warnings from functools import partial from optparse import SUPPRESS_HELP, Option, OptionGroup -from pip._internal.index import ( - FormatControl, fmt_ctl_handle_mutual_exclude, fmt_ctl_no_binary, -) +from pip._internal.exceptions import CommandError from pip._internal.locations import USER_CACHE_DIR, src_prefix +from pip._internal.models.format_control import FormatControl from pip._internal.models.index import PyPI from pip._internal.utils.hashes import STRONG_HASHES from pip._internal.utils.typing import MYPY_CHECK_RUNNING @@ -53,13 +52,52 @@ def check_install_build_global(options, check_options=None): names = ["build_options", "global_options", "install_options"] if any(map(getname, names)): control = options.format_control - fmt_ctl_no_binary(control) + control.disallow_binaries() warnings.warn( 'Disabling all use of wheels due to the use of --build-options ' '/ --global-options / --install-options.', stacklevel=2, ) +def check_dist_restriction(options, check_target=False): + """Function for determining if custom platform options are allowed. + + :param options: The OptionParser options. + :param check_target: Whether or not to check if --target is being used. + """ + dist_restriction_set = any([ + options.python_version, + options.platform, + options.abi, + options.implementation, + ]) + + binary_only = FormatControl(set(), {':all:'}) + sdist_dependencies_allowed = ( + options.format_control != binary_only and + not options.ignore_dependencies + ) + + # Installations or downloads using dist restrictions must not combine + # source distributions and dist-specific wheels, as they are not + # gauranteed to be locally compatible. + if dist_restriction_set and sdist_dependencies_allowed: + raise CommandError( + "When restricting platform and interpreter constraints using " + "--python-version, --platform, --abi, or --implementation, " + "either --no-deps must be set, or --only-binary=:all: must be " + "set and --no-binary must not be set (or must be set to " + ":none:)." + ) + + if check_target: + if dist_restriction_set and not options.target_dir: + raise CommandError( + "Can not use any platform or abi specific options unless " + "installing via '--target'" + ) + + ########### # options # ########### @@ -365,24 +403,25 @@ def _get_format_control(values, option): def _handle_no_binary(option, opt_str, value, parser): - existing = getattr(parser.values, option.dest) - fmt_ctl_handle_mutual_exclude( + existing = _get_format_control(parser.values, option) + FormatControl.handle_mutual_excludes( value, existing.no_binary, existing.only_binary, ) def _handle_only_binary(option, opt_str, value, parser): - existing = getattr(parser.values, option.dest) - fmt_ctl_handle_mutual_exclude( + existing = _get_format_control(parser.values, option) + FormatControl.handle_mutual_excludes( value, existing.only_binary, existing.no_binary, ) def no_binary(): + format_control = FormatControl(set(), set()) return Option( "--no-binary", dest="format_control", action="callback", callback=_handle_no_binary, type="str", - default=FormatControl(set(), set()), + default=format_control, help="Do not use binary packages. Can be supplied multiple times, and " "each time adds to the existing value. Accepts either :all: to " "disable all binary packages, :none: to empty the set, or one or " @@ -393,10 +432,11 @@ def no_binary(): def only_binary(): + format_control = FormatControl(set(), set()) return Option( "--only-binary", dest="format_control", action="callback", callback=_handle_only_binary, type="str", - default=FormatControl(set(), set()), + default=format_control, help="Do not use source packages. Can be supplied multiple times, and " "each time adds to the existing value. Accepts either :all: to " "disable all source packages, :none: to empty the set, or one or " @@ -406,6 +446,61 @@ def only_binary(): ) +platform = partial( + Option, + '--platform', + dest='platform', + metavar='platform', + default=None, + help=("Only use wheels compatible with . " + "Defaults to the platform of the running system."), +) + + +python_version = partial( + Option, + '--python-version', + dest='python_version', + metavar='python_version', + default=None, + help=("Only use wheels compatible with Python " + "interpreter version . If not specified, then the " + "current system interpreter minor version is used. A major " + "version (e.g. '2') can be specified to match all " + "minor revs of that major version. A minor version " + "(e.g. '34') can also be specified."), +) + + +implementation = partial( + Option, + '--implementation', + dest='implementation', + metavar='implementation', + default=None, + help=("Only use wheels compatible with Python " + "implementation , e.g. 'pp', 'jy', 'cp', " + " or 'ip'. If not specified, then the current " + "interpreter implementation is used. Use 'py' to force " + "implementation-agnostic wheels."), +) + + +abi = partial( + Option, + '--abi', + dest='abi', + metavar='abi', + default=None, + help=("Only use wheels compatible with Python " + "abi , e.g. 'pypy_41'. If not specified, then the " + "current interpreter abi tag is used. Generally " + "you will need to specify --implementation, " + "--platform, and --python-version when using " + "this option."), +) + + def prefer_binary(): return Option( "--prefer-binary", @@ -501,7 +596,7 @@ no_clean = partial( '--no-clean', action='store_true', default=False, - help="Don't clean up build directories)." + help="Don't clean up build directories." ) # type: Any pre = partial( diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/cli/main_parser.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/cli/main_parser.py new file mode 100644 index 0000000..1774a6b --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/cli/main_parser.py @@ -0,0 +1,96 @@ +"""A single place for constructing and exposing the main parser +""" + +import os +import sys + +from pip import __version__ +from pip._internal.cli import cmdoptions +from pip._internal.cli.parser import ( + ConfigOptionParser, UpdatingDefaultsHelpFormatter, +) +from pip._internal.commands import ( + commands_dict, get_similar_commands, get_summaries, +) +from pip._internal.exceptions import CommandError +from pip._internal.utils.misc import get_prog + +__all__ = ["create_main_parser", "parse_command"] + + +def create_main_parser(): + """Creates and returns the main parser for pip's CLI + """ + + parser_kw = { + 'usage': '\n%prog [options]', + 'add_help_option': False, + 'formatter': UpdatingDefaultsHelpFormatter(), + 'name': 'global', + 'prog': get_prog(), + } + + parser = ConfigOptionParser(**parser_kw) + parser.disable_interspersed_args() + + pip_pkg_dir = os.path.abspath(os.path.join( + os.path.dirname(__file__), "..", "..", + )) + parser.version = 'pip %s from %s (python %s)' % ( + __version__, pip_pkg_dir, sys.version[:3], + ) + + # add the general options + gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser) + parser.add_option_group(gen_opts) + + parser.main = True # so the help formatter knows + + # create command listing for description + command_summaries = get_summaries() + description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries] + parser.description = '\n'.join(description) + + return parser + + +def parse_command(args): + parser = create_main_parser() + + # Note: parser calls disable_interspersed_args(), so the result of this + # call is to split the initial args into the general options before the + # subcommand and everything else. + # For example: + # args: ['--timeout=5', 'install', '--user', 'INITools'] + # general_options: ['--timeout==5'] + # args_else: ['install', '--user', 'INITools'] + general_options, args_else = parser.parse_args(args) + + # --version + if general_options.version: + sys.stdout.write(parser.version) + sys.stdout.write(os.linesep) + sys.exit() + + # pip || pip help -> print_help() + if not args_else or (args_else[0] == 'help' and len(args_else) == 1): + parser.print_help() + sys.exit() + + # the subcommand name + cmd_name = args_else[0] + + if cmd_name not in commands_dict: + guess = get_similar_commands(cmd_name) + + msg = ['unknown command "%s"' % cmd_name] + if guess: + msg.append('maybe you meant "%s"' % guess) + + raise CommandError(' - '.join(msg)) + + # all the args without the subcommand + cmd_args = args[:] + cmd_args.remove(cmd_name) + + return cmd_name, cmd_args diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/baseparser.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/cli/parser.py similarity index 89% rename from thesisenv/lib/python3.6/site-packages/pip/_internal/baseparser.py rename to thesisenv/lib/python3.6/site-packages/pip/_internal/cli/parser.py index 9a8d129..e1eaac4 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/baseparser.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/cli/parser.py @@ -9,8 +9,9 @@ from distutils.util import strtobool from pip._vendor.six import string_types -from pip._internal.compat import get_terminal_size +from pip._internal.cli.status_codes import UNKNOWN_ERROR from pip._internal.configuration import Configuration, ConfigurationError +from pip._internal.utils.compat import get_terminal_size logger = logging.getLogger(__name__) @@ -192,7 +193,14 @@ class ConfigOptionParser(CustomOptionParser): continue if option.action in ('store_true', 'store_false', 'count'): - val = strtobool(val) + try: + val = strtobool(val) + except ValueError: + error_msg = invalid_config_error_message( + option.action, key, val + ) + self.error(error_msg) + elif option.action == 'append': val = val.split() val = [self.check_default(option, key, v) for v in val] @@ -225,7 +233,7 @@ class ConfigOptionParser(CustomOptionParser): try: self.config.load() except ConfigurationError as err: - self.exit(2, err.args[0]) + self.exit(UNKNOWN_ERROR, str(err)) defaults = self._update_defaults(self.defaults.copy()) # ours for option in self._get_all_options(): @@ -237,4 +245,17 @@ class ConfigOptionParser(CustomOptionParser): def error(self, msg): self.print_usage(sys.stderr) - self.exit(2, "%s\n" % msg) + self.exit(UNKNOWN_ERROR, "%s\n" % msg) + + +def invalid_config_error_message(action, key, val): + """Returns a better error message when invalid configuration option + is provided.""" + if action in ('store_true', 'store_false'): + return ("{0} is not a valid value for {1} option, " + "please specify a boolean value like yes/no, " + "true/false or 1/0 instead.").format(val, key) + + return ("{0} is not a valid value for {1} option, " + "please specify a numerical value like 1/0 " + "instead.").format(val, key) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/status_codes.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/cli/status_codes.py similarity index 100% rename from thesisenv/lib/python3.6/site-packages/pip/_internal/status_codes.py rename to thesisenv/lib/python3.6/site-packages/pip/_internal/cli/status_codes.py diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/__init__.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/__init__.py index 77a300a..c7d1da3 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/__init__.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/__init__.py @@ -21,7 +21,7 @@ from pip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: from typing import List, Type # noqa: F401 - from pip._internal.basecommand import Command # noqa: F401 + from pip._internal.cli.base_command import Command # noqa: F401 commands_order = [ InstallCommand, diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/check.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/check.py index e667075..1be3ec2 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/check.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/check.py @@ -1,6 +1,6 @@ import logging -from pip._internal.basecommand import Command +from pip._internal.cli.base_command import Command from pip._internal.operations.check import ( check_package_set, create_package_set_from_installed, ) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/completion.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/completion.py index c4b3873..2fcdd39 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/completion.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/completion.py @@ -3,7 +3,7 @@ from __future__ import absolute_import import sys import textwrap -from pip._internal.basecommand import Command +from pip._internal.cli.base_command import Command from pip._internal.utils.misc import get_prog BASE_COMPLETION = """ diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/configuration.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/configuration.py index 57448cb..826c08d 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/configuration.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/configuration.py @@ -2,11 +2,11 @@ import logging import os import subprocess -from pip._internal.basecommand import Command +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import ERROR, SUCCESS from pip._internal.configuration import Configuration, kinds from pip._internal.exceptions import PipError from pip._internal.locations import venv_config_file -from pip._internal.status_codes import ERROR, SUCCESS from pip._internal.utils.misc import get_prog logger = logging.getLogger(__name__) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/download.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/download.py index cf4827c..b3f3c6e 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/download.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/download.py @@ -3,10 +3,8 @@ from __future__ import absolute_import import logging import os -from pip._internal import cmdoptions -from pip._internal.basecommand import RequirementCommand -from pip._internal.exceptions import CommandError -from pip._internal.index import FormatControl +from pip._internal.cli import cmdoptions +from pip._internal.cli.base_command import RequirementCommand from pip._internal.operations.prepare import RequirementPreparer from pip._internal.req import RequirementSet from pip._internal.req.req_tracker import RequirementTracker @@ -69,52 +67,10 @@ class DownloadCommand(RequirementCommand): help=("Download packages into ."), ) - cmd_opts.add_option( - '--platform', - dest='platform', - metavar='platform', - default=None, - help=("Only download wheels compatible with . " - "Defaults to the platform of the running system."), - ) - - cmd_opts.add_option( - '--python-version', - dest='python_version', - metavar='python_version', - default=None, - help=("Only download wheels compatible with Python " - "interpreter version . If not specified, then the " - "current system interpreter minor version is used. A major " - "version (e.g. '2') can be specified to match all " - "minor revs of that major version. A minor version " - "(e.g. '34') can also be specified."), - ) - - cmd_opts.add_option( - '--implementation', - dest='implementation', - metavar='implementation', - default=None, - help=("Only download wheels compatible with Python " - "implementation , e.g. 'pp', 'jy', 'cp', " - " or 'ip'. If not specified, then the current " - "interpreter implementation is used. Use 'py' to force " - "implementation-agnostic wheels."), - ) - - cmd_opts.add_option( - '--abi', - dest='abi', - metavar='abi', - default=None, - help=("Only download wheels compatible with Python " - "abi , e.g. 'pypy_41'. If not specified, then the " - "current interpreter abi tag is used. Generally " - "you will need to specify --implementation, " - "--platform, and --python-version when using " - "this option."), - ) + cmd_opts.add_option(cmdoptions.platform()) + cmd_opts.add_option(cmdoptions.python_version()) + cmd_opts.add_option(cmdoptions.implementation()) + cmd_opts.add_option(cmdoptions.abi()) index_opts = cmdoptions.make_option_group( cmdoptions.index_group, @@ -135,25 +91,7 @@ class DownloadCommand(RequirementCommand): else: python_versions = None - dist_restriction_set = any([ - options.python_version, - options.platform, - options.abi, - options.implementation, - ]) - binary_only = FormatControl(set(), {':all:'}) - no_sdist_dependencies = ( - options.format_control != binary_only and - not options.ignore_dependencies - ) - if dist_restriction_set and no_sdist_dependencies: - raise CommandError( - "When restricting platform and interpreter constraints using " - "--python-version, --platform, --abi, or --implementation, " - "either --no-deps must be set, or --only-binary=:all: must be " - "set and --no-binary must not be set (or must be set to " - ":none:)." - ) + cmdoptions.check_dist_restriction(options) options.src_dir = os.path.abspath(options.src_dir) options.download_dir = normalize_path(options.download_dir) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/freeze.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/freeze.py index 0d3d4ae..dc9c53a 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/freeze.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/freeze.py @@ -2,11 +2,11 @@ from __future__ import absolute_import import sys -from pip._internal import index -from pip._internal.basecommand import Command from pip._internal.cache import WheelCache -from pip._internal.compat import stdlib_pkgs +from pip._internal.cli.base_command import Command +from pip._internal.models.format_control import FormatControl from pip._internal.operations.freeze import freeze +from pip._internal.utils.compat import stdlib_pkgs DEV_PKGS = {'pip', 'setuptools', 'distribute', 'wheel'} @@ -71,7 +71,7 @@ class FreezeCommand(Command): self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): - format_control = index.FormatControl(set(), set()) + format_control = FormatControl(set(), set()) wheel_cache = WheelCache(options.cache_dir, format_control) skip = set(stdlib_pkgs) if not options.freeze_all: diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/hash.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/hash.py index 95353b0..423440e 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/hash.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/hash.py @@ -4,8 +4,8 @@ import hashlib import logging import sys -from pip._internal.basecommand import Command -from pip._internal.status_codes import ERROR +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import ERROR from pip._internal.utils.hashes import FAVORITE_HASH, STRONG_HASHES from pip._internal.utils.misc import read_chunks diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/help.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/help.py index 06ca2c1..49a81cb 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/help.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/help.py @@ -1,6 +1,7 @@ from __future__ import absolute_import -from pip._internal.basecommand import SUCCESS, Command +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import SUCCESS from pip._internal.exceptions import CommandError diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/install.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/install.py index f42a1d1..6fc178f 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/install.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/install.py @@ -9,9 +9,10 @@ from optparse import SUPPRESS_HELP from pip._vendor import pkg_resources -from pip._internal import cmdoptions -from pip._internal.basecommand import RequirementCommand from pip._internal.cache import WheelCache +from pip._internal.cli import cmdoptions +from pip._internal.cli.base_command import RequirementCommand +from pip._internal.cli.status_codes import ERROR from pip._internal.exceptions import ( CommandError, InstallationError, PreviousBuildDirError, ) @@ -21,7 +22,6 @@ from pip._internal.operations.prepare import RequirementPreparer from pip._internal.req import RequirementSet, install_given_reqs from pip._internal.req.req_tracker import RequirementTracker from pip._internal.resolve import Resolver -from pip._internal.status_codes import ERROR from pip._internal.utils.filesystem import check_path_owner from pip._internal.utils.misc import ( ensure_dir, get_installed_version, @@ -83,6 +83,11 @@ class InstallCommand(RequirementCommand): '. Use --upgrade to replace existing packages in ' 'with new versions.' ) + cmd_opts.add_option(cmdoptions.platform()) + cmd_opts.add_option(cmdoptions.python_version()) + cmd_opts.add_option(cmdoptions.implementation()) + cmd_opts.add_option(cmdoptions.abi()) + cmd_opts.add_option( '--user', dest='use_user_site', @@ -204,7 +209,6 @@ class InstallCommand(RequirementCommand): def run(self, options, args): cmdoptions.check_install_build_global(options) - upgrade_strategy = "to-satisfy-only" if options.upgrade: upgrade_strategy = options.upgrade_strategy @@ -212,6 +216,13 @@ class InstallCommand(RequirementCommand): if options.build_dir: options.build_dir = os.path.abspath(options.build_dir) + cmdoptions.check_dist_restriction(options, check_target=True) + + if options.python_version: + python_versions = [options.python_version] + else: + python_versions = None + options.src_dir = os.path.abspath(options.src_dir) install_options = options.install_options or [] if options.use_user_site: @@ -246,7 +257,14 @@ class InstallCommand(RequirementCommand): global_options = options.global_options or [] with self._build_session(options) as session: - finder = self._build_package_finder(options, session) + finder = self._build_package_finder( + options=options, + session=session, + platform=options.platform, + python_versions=python_versions, + abi=options.abi, + implementation=options.implementation, + ) build_delete = (not (options.no_clean or options.build_dir)) wheel_cache = WheelCache(options.cache_dir, options.format_control) @@ -266,6 +284,7 @@ class InstallCommand(RequirementCommand): ) as directory: requirement_set = RequirementSet( require_hashes=options.require_hashes, + check_supported_wheels=not options.target_dir, ) try: diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/list.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/list.py index 53fb015..c6eeca7 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/list.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/list.py @@ -6,8 +6,8 @@ import logging from pip._vendor import six from pip._vendor.six.moves import zip_longest -from pip._internal.basecommand import Command -from pip._internal.cmdoptions import index_group, make_option_group +from pip._internal.cli import cmdoptions +from pip._internal.cli.base_command import Command from pip._internal.exceptions import CommandError from pip._internal.index import PackageFinder from pip._internal.utils.misc import ( @@ -102,7 +102,9 @@ class ListCommand(Command): help='Include editable package from output.', default=True, ) - index_opts = make_option_group(index_group, self.parser) + index_opts = cmdoptions.make_option_group( + cmdoptions.index_group, self.parser + ) self.parser.insert_option_group(0, index_opts) self.parser.insert_option_group(0, cmd_opts) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/search.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/search.py index a47a9a7..c157a31 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/search.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/search.py @@ -11,12 +11,12 @@ from pip._vendor.packaging.version import parse as parse_version # why we ignore the type on this import from pip._vendor.six.moves import xmlrpc_client # type: ignore -from pip._internal.basecommand import SUCCESS, Command -from pip._internal.compat import get_terminal_size +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS from pip._internal.download import PipXmlrpcTransport from pip._internal.exceptions import CommandError from pip._internal.models.index import PyPI -from pip._internal.status_codes import NO_MATCHES_FOUND +from pip._internal.utils.compat import get_terminal_size from pip._internal.utils.logging import indent_log logger = logging.getLogger(__name__) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/show.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/show.py index 4136133..f92c9bc 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/show.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/show.py @@ -7,8 +7,8 @@ from email.parser import FeedParser # type: ignore from pip._vendor import pkg_resources from pip._vendor.packaging.utils import canonicalize_name -from pip._internal.basecommand import Command -from pip._internal.status_codes import ERROR, SUCCESS +from pip._internal.cli.base_command import Command +from pip._internal.cli.status_codes import ERROR, SUCCESS logger = logging.getLogger(__name__) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/uninstall.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/uninstall.py index ba5a2f5..0cd6f54 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/uninstall.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/uninstall.py @@ -2,9 +2,10 @@ from __future__ import absolute_import from pip._vendor.packaging.utils import canonicalize_name -from pip._internal.basecommand import Command +from pip._internal.cli.base_command import Command from pip._internal.exceptions import InstallationError -from pip._internal.req import InstallRequirement, parse_requirements +from pip._internal.req import parse_requirements +from pip._internal.req.constructors import install_req_from_line from pip._internal.utils.misc import protect_pip_from_modification_on_windows @@ -47,7 +48,7 @@ class UninstallCommand(Command): with self._build_session(options) as session: reqs_to_uninstall = {} for name in args: - req = InstallRequirement.from_line( + req = install_req_from_line( name, isolated=options.isolated_mode, ) if req.name: diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/wheel.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/wheel.py index 4189387..9c1f149 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/wheel.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/commands/wheel.py @@ -4,9 +4,9 @@ from __future__ import absolute_import import logging import os -from pip._internal import cmdoptions -from pip._internal.basecommand import RequirementCommand from pip._internal.cache import WheelCache +from pip._internal.cli import cmdoptions +from pip._internal.cli.base_command import RequirementCommand from pip._internal.exceptions import CommandError, PreviousBuildDirError from pip._internal.operations.prepare import RequirementPreparer from pip._internal.req import RequirementSet diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/configuration.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/configuration.py index 32133de..fe6df9b 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/configuration.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/configuration.py @@ -18,7 +18,9 @@ import os from pip._vendor import six from pip._vendor.six.moves import configparser -from pip._internal.exceptions import ConfigurationError +from pip._internal.exceptions import ( + ConfigurationError, ConfigurationFileCouldNotBeLoaded, +) from pip._internal.locations import ( legacy_config_file, new_config_file, running_under_virtualenv, site_config_files, venv_config_file, @@ -289,11 +291,16 @@ class Configuration(object): try: parser.read(fname) except UnicodeDecodeError: - raise ConfigurationError(( - "ERROR: " - "Configuration file contains invalid %s characters.\n" - "Please fix your configuration, located at %s\n" - ) % (locale.getpreferredencoding(False), fname)) + # See https://github.com/pypa/pip/issues/4963 + raise ConfigurationFileCouldNotBeLoaded( + reason="contains invalid {} characters".format( + locale.getpreferredencoding(False) + ), + fname=fname, + ) + except configparser.Error as error: + # See https://github.com/pypa/pip/issues/4893 + raise ConfigurationFileCouldNotBeLoaded(error=error) return parser def _load_environment_vars(self): diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/exceptions.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/exceptions.py index ad6f412..f1ca6f3 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/exceptions.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/exceptions.py @@ -247,3 +247,22 @@ class HashMismatch(HashError): class UnsupportedPythonVersion(InstallationError): """Unsupported python version according to Requires-Python package metadata.""" + + +class ConfigurationFileCouldNotBeLoaded(ConfigurationError): + """When there are errors while loading a configuration file + """ + + def __init__(self, reason="could not be loaded", fname=None, error=None): + super(ConfigurationFileCouldNotBeLoaded, self).__init__(error) + self.reason = reason + self.fname = fname + self.error = error + + def __str__(self): + if self.fname is not None: + message_part = " in {}.".format(self.fname) + else: + assert self.error is not None + message_part = ".\n{}\n".format(self.error.message) + return "Configuration file {}{}".format(self.reason, message_part) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/index.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/index.py index 8c0ec82..8c2f24f 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/index.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/index.py @@ -20,24 +20,27 @@ from pip._vendor.requests.exceptions import SSLError from pip._vendor.six.moves.urllib import parse as urllib_parse from pip._vendor.six.moves.urllib import request as urllib_request -from pip._internal.compat import ipaddress from pip._internal.download import HAS_TLS, is_url, path_to_url, url_to_path from pip._internal.exceptions import ( BestVersionAlreadyInstalled, DistributionNotFound, InvalidWheelFilename, UnsupportedWheel, ) +from pip._internal.models.candidate import InstallationCandidate +from pip._internal.models.format_control import FormatControl from pip._internal.models.index import PyPI +from pip._internal.models.link import Link from pip._internal.pep425tags import get_supported +from pip._internal.utils.compat import ipaddress from pip._internal.utils.deprecation import deprecated from pip._internal.utils.logging import indent_log from pip._internal.utils.misc import ( - ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS, cached_property, normalize_path, - remove_auth_from_url, splitext, + ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS, normalize_path, + remove_auth_from_url, ) from pip._internal.utils.packaging import check_requires_python from pip._internal.wheel import Wheel, wheel_ext -__all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder'] +__all__ = ['FormatControl', 'PackageFinder'] SECURE_ORIGINS = [ @@ -56,45 +59,120 @@ SECURE_ORIGINS = [ logger = logging.getLogger(__name__) -class InstallationCandidate(object): +def _get_content_type(url, session): + """Get the Content-Type of the given url, using a HEAD request""" + scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url) + if scheme not in {'http', 'https'}: + # FIXME: some warning or something? + # assertion error? + return '' - def __init__(self, project, version, location): - self.project = project - self.version = parse_version(version) - self.location = location - self._key = (self.project, self.version, self.location) + resp = session.head(url, allow_redirects=True) + resp.raise_for_status() - def __repr__(self): - return "".format( - self.project, self.version, self.location, + return resp.headers.get("Content-Type", "") + + +def _handle_get_page_fail(link, reason, url, meth=None): + if meth is None: + meth = logger.debug + meth("Could not fetch URL %s: %s - skipping", link, reason) + + +def _get_html_page(link, session=None): + if session is None: + raise TypeError( + "_get_html_page() missing 1 required keyword argument: 'session'" ) - def __hash__(self): - return hash(self._key) + url = link.url + url = url.split('#', 1)[0] - def __lt__(self, other): - return self._compare(other, lambda s, o: s < o) + # Check for VCS schemes that do not support lookup as web pages. + from pip._internal.vcs import VcsSupport + for scheme in VcsSupport.schemes: + if url.lower().startswith(scheme) and url[len(scheme)] in '+:': + logger.debug('Cannot look at %s URL %s', scheme, link) + return None - def __le__(self, other): - return self._compare(other, lambda s, o: s <= o) + try: + filename = link.filename + for bad_ext in ARCHIVE_EXTENSIONS: + if filename.endswith(bad_ext): + content_type = _get_content_type(url, session=session) + if content_type.lower().startswith('text/html'): + break + else: + logger.debug( + 'Skipping page %s because of Content-Type: %s', + link, + content_type, + ) + return - def __eq__(self, other): - return self._compare(other, lambda s, o: s == o) + logger.debug('Getting page %s', url) - def __ge__(self, other): - return self._compare(other, lambda s, o: s >= o) + # Tack index.html onto file:// URLs that point to directories + (scheme, netloc, path, params, query, fragment) = \ + urllib_parse.urlparse(url) + if (scheme == 'file' and + os.path.isdir(urllib_request.url2pathname(path))): + # add trailing slash if not present so urljoin doesn't trim + # final segment + if not url.endswith('/'): + url += '/' + url = urllib_parse.urljoin(url, 'index.html') + logger.debug(' file: URL is directory, getting %s', url) - def __gt__(self, other): - return self._compare(other, lambda s, o: s > o) + resp = session.get( + url, + headers={ + "Accept": "text/html", + # We don't want to blindly returned cached data for + # /simple/, because authors generally expecting that + # twine upload && pip install will function, but if + # they've done a pip install in the last ~10 minutes + # it won't. Thus by setting this to zero we will not + # blindly use any cached data, however the benefit of + # using max-age=0 instead of no-cache, is that we will + # still support conditional requests, so we will still + # minimize traffic sent in cases where the page hasn't + # changed at all, we will just always incur the round + # trip for the conditional GET now instead of only + # once per 10 minutes. + # For more information, please see pypa/pip#5670. + "Cache-Control": "max-age=0", + }, + ) + resp.raise_for_status() - def __ne__(self, other): - return self._compare(other, lambda s, o: s != o) + # The check for archives above only works if the url ends with + # something that looks like an archive. However that is not a + # requirement of an url. Unless we issue a HEAD request on every + # url we cannot know ahead of time for sure if something is HTML + # or not. However we can check after we've downloaded it. + content_type = resp.headers.get('Content-Type', 'unknown') + if not content_type.lower().startswith("text/html"): + logger.debug( + 'Skipping page %s because of Content-Type: %s', + link, + content_type, + ) + return - def _compare(self, other, method): - if not isinstance(other, InstallationCandidate): - return NotImplemented - - return method(self._key, other._key) + inst = HTMLPage(resp.content, resp.url, resp.headers) + except requests.HTTPError as exc: + _handle_get_page_fail(link, exc, url) + except SSLError as exc: + reason = "There was a problem confirming the ssl certificate: " + reason += str(exc) + _handle_get_page_fail(link, reason, url, meth=logger.info) + except requests.ConnectionError as exc: + _handle_get_page_fail(link, "connection error: %s" % exc, url) + except requests.Timeout: + _handle_get_page_fail(link, "timed out", url) + else: + return inst class PackageFinder(object): @@ -206,15 +284,15 @@ class PackageFinder(object): return "\n".join(lines) def add_dependency_links(self, links): - # # FIXME: this shouldn't be global list this, it should only - # # apply to requirements of the package that specifies the - # # dependency_links value - # # FIXME: also, we should track comes_from (i.e., use Link) + # FIXME: this shouldn't be global list this, it should only + # apply to requirements of the package that specifies the + # dependency_links value + # FIXME: also, we should track comes_from (i.e., use Link) if self.process_dependency_links: deprecated( "Dependency Links processing has been deprecated and will be " "removed in a future release.", - replacement=None, + replacement="PEP 508 URL dependencies", gone_in="18.2", issue=4187, ) @@ -440,7 +518,7 @@ class PackageFinder(object): logger.debug('* %s', location) canonical_name = canonicalize_name(project_name) - formats = fmt_ctl_formats(self.format_control, canonical_name) + formats = self.format_control.get_allowed_formats(canonical_name) search = Search(project_name, canonical_name, formats) find_links_versions = self._package_versions( # We trust every directly linked archive in find_links @@ -453,7 +531,7 @@ class PackageFinder(object): logger.debug('Analyzing links from page %s', page.url) with indent_log(): page_versions.extend( - self._package_versions(page.links, search) + self._package_versions(page.iter_links(), search) ) dependency_versions = self._package_versions( @@ -712,7 +790,7 @@ class PackageFinder(object): return InstallationCandidate(search.supplied, version, link) def _get_page(self, link): - return HTMLPage.get_page(link, session=self.session) + return _get_html_page(link, session=self.session) def egg_info_matches( @@ -732,7 +810,7 @@ def egg_info_matches( return None if search_name is None: full_match = match.group(0) - return full_match[full_match.index('-'):] + return full_match.split('-', 1)[-1] name = match.group(0).lower() # To match the "safe" name that pkg_resources creates: name = name.replace('_', '-') @@ -744,377 +822,71 @@ def egg_info_matches( return None +def _determine_base_url(document, page_url): + """Determine the HTML document's base URL. + + This looks for a ```` tag in the HTML document. If present, its href + attribute denotes the base URL of anchor tags in the document. If there is + no such tag (or if it does not have a valid href attribute), the HTML + file's URL is used as the base URL. + + :param document: An HTML document representation. The current + implementation expects the result of ``html5lib.parse()``. + :param page_url: The URL of the HTML document. + """ + for base in document.findall(".//base"): + href = base.get("href") + if href is not None: + return href + return page_url + + +def _get_encoding_from_headers(headers): + """Determine if we have any encoding information in our headers. + """ + if headers and "Content-Type" in headers: + content_type, params = cgi.parse_header(headers["Content-Type"]) + if "charset" in params: + return params['charset'] + return None + + +_CLEAN_LINK_RE = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) + + +def _clean_link(url): + """Makes sure a link is fully encoded. That is, if a ' ' shows up in + the link, it will be rewritten to %20 (while not over-quoting + % or other characters).""" + return _CLEAN_LINK_RE.sub(lambda match: '%%%2x' % ord(match.group(0)), url) + + class HTMLPage(object): """Represents one page, along with its URL""" def __init__(self, content, url, headers=None): - # Determine if we have any encoding information in our headers - encoding = None - if headers and "Content-Type" in headers: - content_type, params = cgi.parse_header(headers["Content-Type"]) - - if "charset" in params: - encoding = params['charset'] - self.content = content - self.parsed = html5lib.parse( - self.content, - transport_encoding=encoding, - namespaceHTMLElements=False, - ) self.url = url self.headers = headers def __str__(self): return self.url - @classmethod - def get_page(cls, link, skip_archives=True, session=None): - if session is None: - raise TypeError( - "get_page() missing 1 required keyword argument: 'session'" - ) - - url = link.url - url = url.split('#', 1)[0] - - # Check for VCS schemes that do not support lookup as web pages. - from pip._internal.vcs import VcsSupport - for scheme in VcsSupport.schemes: - if url.lower().startswith(scheme) and url[len(scheme)] in '+:': - logger.debug('Cannot look at %s URL %s', scheme, link) - return None - - try: - if skip_archives: - filename = link.filename - for bad_ext in ARCHIVE_EXTENSIONS: - if filename.endswith(bad_ext): - content_type = cls._get_content_type( - url, session=session, - ) - if content_type.lower().startswith('text/html'): - break - else: - logger.debug( - 'Skipping page %s because of Content-Type: %s', - link, - content_type, - ) - return - - logger.debug('Getting page %s', url) - - # Tack index.html onto file:// URLs that point to directories - (scheme, netloc, path, params, query, fragment) = \ - urllib_parse.urlparse(url) - if (scheme == 'file' and - os.path.isdir(urllib_request.url2pathname(path))): - # add trailing slash if not present so urljoin doesn't trim - # final segment - if not url.endswith('/'): - url += '/' - url = urllib_parse.urljoin(url, 'index.html') - logger.debug(' file: URL is directory, getting %s', url) - - resp = session.get( - url, - headers={ - "Accept": "text/html", - "Cache-Control": "max-age=600", - }, - ) - resp.raise_for_status() - - # The check for archives above only works if the url ends with - # something that looks like an archive. However that is not a - # requirement of an url. Unless we issue a HEAD request on every - # url we cannot know ahead of time for sure if something is HTML - # or not. However we can check after we've downloaded it. - content_type = resp.headers.get('Content-Type', 'unknown') - if not content_type.lower().startswith("text/html"): - logger.debug( - 'Skipping page %s because of Content-Type: %s', - link, - content_type, - ) - return - - inst = cls(resp.content, resp.url, resp.headers) - except requests.HTTPError as exc: - cls._handle_fail(link, exc, url) - except SSLError as exc: - reason = "There was a problem confirming the ssl certificate: " - reason += str(exc) - cls._handle_fail(link, reason, url, meth=logger.info) - except requests.ConnectionError as exc: - cls._handle_fail(link, "connection error: %s" % exc, url) - except requests.Timeout: - cls._handle_fail(link, "timed out", url) - else: - return inst - - @staticmethod - def _handle_fail(link, reason, url, meth=None): - if meth is None: - meth = logger.debug - - meth("Could not fetch URL %s: %s - skipping", link, reason) - - @staticmethod - def _get_content_type(url, session): - """Get the Content-Type of the given url, using a HEAD request""" - scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url) - if scheme not in {'http', 'https'}: - # FIXME: some warning or something? - # assertion error? - return '' - - resp = session.head(url, allow_redirects=True) - resp.raise_for_status() - - return resp.headers.get("Content-Type", "") - - @cached_property - def base_url(self): - bases = [ - x for x in self.parsed.findall(".//base") - if x.get("href") is not None - ] - if bases and bases[0].get("href"): - return bases[0].get("href") - else: - return self.url - - @property - def links(self): + def iter_links(self): """Yields all links in the page""" - for anchor in self.parsed.findall(".//a"): + document = html5lib.parse( + self.content, + transport_encoding=_get_encoding_from_headers(self.headers), + namespaceHTMLElements=False, + ) + base_url = _determine_base_url(document, self.url) + for anchor in document.findall(".//a"): if anchor.get("href"): href = anchor.get("href") - url = self.clean_link( - urllib_parse.urljoin(self.base_url, href) - ) + url = _clean_link(urllib_parse.urljoin(base_url, href)) pyrequire = anchor.get('data-requires-python') pyrequire = unescape(pyrequire) if pyrequire else None - yield Link(url, self, requires_python=pyrequire) - - _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) - - def clean_link(self, url): - """Makes sure a link is fully encoded. That is, if a ' ' shows up in - the link, it will be rewritten to %20 (while not over-quoting - % or other characters).""" - return self._clean_re.sub( - lambda match: '%%%2x' % ord(match.group(0)), url) - - -class Link(object): - - def __init__(self, url, comes_from=None, requires_python=None): - """ - Object representing a parsed link from https://pypi.org/simple/* - - url: - url of the resource pointed to (href of the link) - comes_from: - instance of HTMLPage where the link was found, or string. - requires_python: - String containing the `Requires-Python` metadata field, specified - in PEP 345. This may be specified by a data-requires-python - attribute in the HTML link tag, as described in PEP 503. - """ - - # url can be a UNC windows share - if url.startswith('\\\\'): - url = path_to_url(url) - - self.url = url - self.comes_from = comes_from - self.requires_python = requires_python if requires_python else None - - def __str__(self): - if self.requires_python: - rp = ' (requires-python:%s)' % self.requires_python - else: - rp = '' - if self.comes_from: - return '%s (from %s)%s' % (self.url, self.comes_from, rp) - else: - return str(self.url) - - def __repr__(self): - return '' % self - - def __eq__(self, other): - if not isinstance(other, Link): - return NotImplemented - return self.url == other.url - - def __ne__(self, other): - if not isinstance(other, Link): - return NotImplemented - return self.url != other.url - - def __lt__(self, other): - if not isinstance(other, Link): - return NotImplemented - return self.url < other.url - - def __le__(self, other): - if not isinstance(other, Link): - return NotImplemented - return self.url <= other.url - - def __gt__(self, other): - if not isinstance(other, Link): - return NotImplemented - return self.url > other.url - - def __ge__(self, other): - if not isinstance(other, Link): - return NotImplemented - return self.url >= other.url - - def __hash__(self): - return hash(self.url) - - @property - def filename(self): - _, netloc, path, _, _ = urllib_parse.urlsplit(self.url) - name = posixpath.basename(path.rstrip('/')) or netloc - name = urllib_parse.unquote(name) - assert name, ('URL %r produced no filename' % self.url) - return name - - @property - def scheme(self): - return urllib_parse.urlsplit(self.url)[0] - - @property - def netloc(self): - return urllib_parse.urlsplit(self.url)[1] - - @property - def path(self): - return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2]) - - def splitext(self): - return splitext(posixpath.basename(self.path.rstrip('/'))) - - @property - def ext(self): - return self.splitext()[1] - - @property - def url_without_fragment(self): - scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url) - return urllib_parse.urlunsplit((scheme, netloc, path, query, None)) - - _egg_fragment_re = re.compile(r'[#&]egg=([^&]*)') - - @property - def egg_fragment(self): - match = self._egg_fragment_re.search(self.url) - if not match: - return None - return match.group(1) - - _subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)') - - @property - def subdirectory_fragment(self): - match = self._subdirectory_fragment_re.search(self.url) - if not match: - return None - return match.group(1) - - _hash_re = re.compile( - r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)' - ) - - @property - def hash(self): - match = self._hash_re.search(self.url) - if match: - return match.group(2) - return None - - @property - def hash_name(self): - match = self._hash_re.search(self.url) - if match: - return match.group(1) - return None - - @property - def show_url(self): - return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0]) - - @property - def is_wheel(self): - return self.ext == wheel_ext - - @property - def is_artifact(self): - """ - Determines if this points to an actual artifact (e.g. a tarball) or if - it points to an "abstract" thing like a path or a VCS location. - """ - from pip._internal.vcs import vcs - - if self.scheme in vcs.all_schemes: - return False - - return True - - -FormatControl = namedtuple('FormatControl', 'no_binary only_binary') -"""This object has two fields, no_binary and only_binary. - -If a field is falsy, it isn't set. If it is {':all:'}, it should match all -packages except those listed in the other field. Only one field can be set -to {':all:'} at a time. The rest of the time exact package name matches -are listed, with any given package only showing up in one field at a time. -""" - - -def fmt_ctl_handle_mutual_exclude(value, target, other): - new = value.split(',') - while ':all:' in new: - other.clear() - target.clear() - target.add(':all:') - del new[:new.index(':all:') + 1] - if ':none:' not in new: - # Without a none, we want to discard everything as :all: covers it - return - for name in new: - if name == ':none:': - target.clear() - continue - name = canonicalize_name(name) - other.discard(name) - target.add(name) - - -def fmt_ctl_formats(fmt_ctl, canonical_name): - result = {"binary", "source"} - if canonical_name in fmt_ctl.only_binary: - result.discard('source') - elif canonical_name in fmt_ctl.no_binary: - result.discard('binary') - elif ':all:' in fmt_ctl.only_binary: - result.discard('source') - elif ':all:' in fmt_ctl.no_binary: - result.discard('binary') - return frozenset(result) - - -def fmt_ctl_no_binary(fmt_ctl): - fmt_ctl_handle_mutual_exclude( - ':all:', fmt_ctl.no_binary, fmt_ctl.only_binary, - ) + yield Link(url, self.url, requires_python=pyrequire) Search = namedtuple('Search', 'supplied canonical formats') diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/locations.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/locations.py index 4a7b61e..183aaa3 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/locations.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/locations.py @@ -10,8 +10,8 @@ import sysconfig from distutils import sysconfig as distutils_sysconfig from distutils.command.install import SCHEME_KEYS # type: ignore -from pip._internal.compat import WINDOWS, expanduser from pip._internal.utils import appdirs +from pip._internal.utils.compat import WINDOWS, expanduser # Application Directories USER_CACHE_DIR = appdirs.user_cache_dir("pip") diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/models/candidate.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/models/candidate.py new file mode 100644 index 0000000..c736de6 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/models/candidate.py @@ -0,0 +1,23 @@ +from pip._vendor.packaging.version import parse as parse_version + +from pip._internal.utils.models import KeyBasedCompareMixin + + +class InstallationCandidate(KeyBasedCompareMixin): + """Represents a potential "candidate" for installation. + """ + + def __init__(self, project, version, location): + self.project = project + self.version = parse_version(version) + self.location = location + + super(InstallationCandidate, self).__init__( + key=(self.project, self.version, self.location), + defining_class=InstallationCandidate + ) + + def __repr__(self): + return "".format( + self.project, self.version, self.location, + ) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/models/format_control.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/models/format_control.py new file mode 100644 index 0000000..2748856 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/models/format_control.py @@ -0,0 +1,62 @@ +from pip._vendor.packaging.utils import canonicalize_name + + +class FormatControl(object): + """A helper class for controlling formats from which packages are installed. + If a field is falsy, it isn't set. If it is {':all:'}, it should match all + packages except those listed in the other field. Only one field can be set + to {':all:'} at a time. The rest of the time exact package name matches + are listed, with any given package only showing up in one field at a time. + """ + def __init__(self, no_binary=None, only_binary=None): + self.no_binary = set() if no_binary is None else no_binary + self.only_binary = set() if only_binary is None else only_binary + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return "{}({}, {})".format( + self.__class__.__name__, + self.no_binary, + self.only_binary + ) + + @staticmethod + def handle_mutual_excludes(value, target, other): + new = value.split(',') + while ':all:' in new: + other.clear() + target.clear() + target.add(':all:') + del new[:new.index(':all:') + 1] + # Without a none, we want to discard everything as :all: covers it + if ':none:' not in new: + return + for name in new: + if name == ':none:': + target.clear() + continue + name = canonicalize_name(name) + other.discard(name) + target.add(name) + + def get_allowed_formats(self, canonical_name): + result = {"binary", "source"} + if canonical_name in self.only_binary: + result.discard('source') + elif canonical_name in self.no_binary: + result.discard('binary') + elif ':all:' in self.only_binary: + result.discard('source') + elif ':all:' in self.no_binary: + result.discard('binary') + return frozenset(result) + + def disallow_binaries(self): + self.handle_mutual_excludes( + ':all:', self.no_binary, self.only_binary, + ) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/models/index.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/models/index.py index a7f10c8..870a315 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/models/index.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/models/index.py @@ -1,15 +1,29 @@ from pip._vendor.six.moves.urllib import parse as urllib_parse -class Index(object): - def __init__(self, url): +class PackageIndex(object): + """Represents a Package Index and provides easier access to endpoints + """ + + def __init__(self, url, file_storage_domain): + super(PackageIndex, self).__init__() self.url = url self.netloc = urllib_parse.urlsplit(url).netloc - self.simple_url = self.url_to_path('simple') - self.pypi_url = self.url_to_path('pypi') + self.simple_url = self._url_for_path('simple') + self.pypi_url = self._url_for_path('pypi') - def url_to_path(self, path): + # This is part of a temporary hack used to block installs of PyPI + # packages which depend on external urls only necessary until PyPI can + # block such packages themselves + self.file_storage_domain = file_storage_domain + + def _url_for_path(self, path): return urllib_parse.urljoin(self.url, path) -PyPI = Index('https://pypi.org/') +PyPI = PackageIndex( + 'https://pypi.org/', file_storage_domain='files.pythonhosted.org' +) +TestPyPI = PackageIndex( + 'https://test.pypi.org/', file_storage_domain='test-files.pythonhosted.org' +) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/models/link.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/models/link.py new file mode 100644 index 0000000..5decb7c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/models/link.py @@ -0,0 +1,141 @@ +import posixpath +import re + +from pip._vendor.six.moves.urllib import parse as urllib_parse + +from pip._internal.download import path_to_url +from pip._internal.utils.misc import splitext +from pip._internal.utils.models import KeyBasedCompareMixin +from pip._internal.wheel import wheel_ext + + +class Link(KeyBasedCompareMixin): + """Represents a parsed link from a Package Index's simple URL + """ + + def __init__(self, url, comes_from=None, requires_python=None): + """ + url: + url of the resource pointed to (href of the link) + comes_from: + instance of HTMLPage where the link was found, or string. + requires_python: + String containing the `Requires-Python` metadata field, specified + in PEP 345. This may be specified by a data-requires-python + attribute in the HTML link tag, as described in PEP 503. + """ + + # url can be a UNC windows share + if url.startswith('\\\\'): + url = path_to_url(url) + + self.url = url + self.comes_from = comes_from + self.requires_python = requires_python if requires_python else None + + super(Link, self).__init__( + key=(self.url), + defining_class=Link + ) + + def __str__(self): + if self.requires_python: + rp = ' (requires-python:%s)' % self.requires_python + else: + rp = '' + if self.comes_from: + return '%s (from %s)%s' % (self.url, self.comes_from, rp) + else: + return str(self.url) + + def __repr__(self): + return '' % self + + @property + def filename(self): + _, netloc, path, _, _ = urllib_parse.urlsplit(self.url) + name = posixpath.basename(path.rstrip('/')) or netloc + name = urllib_parse.unquote(name) + assert name, ('URL %r produced no filename' % self.url) + return name + + @property + def scheme(self): + return urllib_parse.urlsplit(self.url)[0] + + @property + def netloc(self): + return urllib_parse.urlsplit(self.url)[1] + + @property + def path(self): + return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2]) + + def splitext(self): + return splitext(posixpath.basename(self.path.rstrip('/'))) + + @property + def ext(self): + return self.splitext()[1] + + @property + def url_without_fragment(self): + scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url) + return urllib_parse.urlunsplit((scheme, netloc, path, query, None)) + + _egg_fragment_re = re.compile(r'[#&]egg=([^&]*)') + + @property + def egg_fragment(self): + match = self._egg_fragment_re.search(self.url) + if not match: + return None + return match.group(1) + + _subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)') + + @property + def subdirectory_fragment(self): + match = self._subdirectory_fragment_re.search(self.url) + if not match: + return None + return match.group(1) + + _hash_re = re.compile( + r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)' + ) + + @property + def hash(self): + match = self._hash_re.search(self.url) + if match: + return match.group(2) + return None + + @property + def hash_name(self): + match = self._hash_re.search(self.url) + if match: + return match.group(1) + return None + + @property + def show_url(self): + return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0]) + + @property + def is_wheel(self): + return self.ext == wheel_ext + + @property + def is_artifact(self): + """ + Determines if this points to an actual artifact (e.g. a tarball) or if + it points to an "abstract" thing like a path or a VCS location. + """ + from pip._internal.vcs import vcs + + if self.scheme in vcs.all_schemes: + return False + + return True diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/operations/freeze.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/operations/freeze.py index 4bbc27b..beb2feb 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/operations/freeze.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/operations/freeze.py @@ -10,11 +10,13 @@ from pip._vendor.packaging.utils import canonicalize_name from pip._vendor.pkg_resources import RequirementParseError from pip._internal.exceptions import InstallationError -from pip._internal.req import InstallRequirement +from pip._internal.req.constructors import ( + install_req_from_editable, install_req_from_line, +) from pip._internal.req.req_file import COMMENT_RE from pip._internal.utils.deprecation import deprecated from pip._internal.utils.misc import ( - dist_is_editable, get_installed_distributions, + dist_is_editable, get_installed_distributions, make_vcs_requirement_url, ) logger = logging.getLogger(__name__) @@ -99,13 +101,13 @@ def freeze( line = line[2:].strip() else: line = line[len('--editable'):].strip().lstrip('=') - line_req = InstallRequirement.from_editable( + line_req = install_req_from_editable( line, isolated=isolated, wheel_cache=wheel_cache, ) else: - line_req = InstallRequirement.from_line( + line_req = install_req_from_line( COMMENT_RE.sub('', line).strip(), isolated=isolated, wheel_cache=wheel_cache, @@ -166,7 +168,13 @@ class FrozenRequirement(object): _date_re = re.compile(r'-(20\d\d\d\d\d\d)$') @classmethod - def from_dist(cls, dist, dependency_links): + def _init_args_from_dist(cls, dist, dependency_links): + """ + Compute and return arguments (req, editable, comments) to pass to + FrozenRequirement.__init__(). + + This method is for use in FrozenRequirement.from_dist(). + """ location = os.path.normcase(os.path.abspath(dist.location)) comments = [] from pip._internal.vcs import vcs, get_src_requirement @@ -231,12 +239,15 @@ class FrozenRequirement(object): else: rev = '{%s}' % date_match.group(1) editable = True - req = '%s@%s#egg=%s' % ( - svn_location, - rev, - cls.egg_name(dist) - ) - return cls(dist.project_name, req, editable, comments) + egg_name = cls.egg_name(dist) + req = make_vcs_requirement_url(svn_location, rev, egg_name) + + return (req, editable, comments) + + @classmethod + def from_dist(cls, dist, dependency_links): + args = cls._init_args_from_dist(dist, dependency_links) + return cls(dist.project_name, *args) @staticmethod def egg_name(dist): diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/operations/prepare.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/operations/prepare.py index 7740c28..104bea3 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/operations/prepare.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/operations/prepare.py @@ -7,7 +7,6 @@ import os from pip._vendor import pkg_resources, requests from pip._internal.build_env import BuildEnvironment -from pip._internal.compat import expanduser from pip._internal.download import ( is_dir_url, is_file_url, is_vcs_url, unpack_url, url_to_path, ) @@ -15,6 +14,7 @@ from pip._internal.exceptions import ( DirectoryUrlHashUnsupported, HashUnpinned, InstallationError, PreviousBuildDirError, VcsHashUnsupported, ) +from pip._internal.utils.compat import expanduser from pip._internal.utils.hashes import MissingHashes from pip._internal.utils.logging import indent_log from pip._internal.utils.misc import display_path, normalize_path @@ -65,7 +65,7 @@ class DistAbstraction(object): """Return a setuptools Dist object.""" raise NotImplementedError(self.dist) - def prep_for_dist(self, finder): + def prep_for_dist(self, finder, build_isolation): """Ensure that we can get a Dist for this requirement.""" raise NotImplementedError(self.dist) @@ -93,36 +93,36 @@ class IsSDist(DistAbstraction): return dist def prep_for_dist(self, finder, build_isolation): - # Before calling "setup.py egg_info", we need to set-up the build - # environment. - build_requirements = self.req.get_pep_518_info() - should_isolate = build_isolation and build_requirements is not None + # Prepare for building. We need to: + # 1. Load pyproject.toml (if it exists) + # 2. Set up the build environment + + self.req.load_pyproject_toml() + should_isolate = self.req.use_pep517 and build_isolation if should_isolate: - # Haven't implemented PEP 517 yet, so spew a warning about it if - # build-requirements don't include setuptools and wheel. - missing_requirements = {'setuptools', 'wheel'} - { - pkg_resources.Requirement(r).key for r in build_requirements - } - if missing_requirements: + # Isolate in a BuildEnvironment and install the build-time + # requirements. + self.req.build_env = BuildEnvironment() + self.req.build_env.install_requirements( + finder, self.req.pyproject_requires, + "Installing build dependencies" + ) + missing = [] + if self.req.requirements_to_check: + check = self.req.requirements_to_check + missing = self.req.build_env.missing_requirements(check) + if missing: logger.warning( "Missing build requirements in pyproject.toml for %s.", self.req, ) logger.warning( - "This version of pip does not implement PEP 517 so it " - "cannot build a wheel without %s.", - " and ".join(map(repr, sorted(missing_requirements))) + "The project does not specify a build backend, and pip " + "cannot fall back to setuptools without %s.", + " and ".join(map(repr, sorted(missing))) ) - # Isolate in a BuildEnvironment and install the build-time - # requirements. - self.req.build_env = BuildEnvironment() - self.req.build_env.install_requirements( - finder, build_requirements, - "Installing build dependencies" - ) - self.req.run_egg_info() self.req.assert_source_matches_version() @@ -132,7 +132,7 @@ class Installed(DistAbstraction): def dist(self, finder): return self.req.satisfied_by - def prep_for_dist(self, finder): + def prep_for_dist(self, finder, build_isolation): pass diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/pep425tags.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/pep425tags.py index 0b5c783..ab1a029 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/pep425tags.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/pep425tags.py @@ -11,6 +11,7 @@ import warnings from collections import OrderedDict import pip._internal.utils.glibc +from pip._internal.utils.compat import get_extension_suffixes logger = logging.getLogger(__name__) @@ -252,10 +253,9 @@ def get_supported(versions=None, noarch=False, platform=None, abis[0:0] = [abi] abi3s = set() - import imp - for suffix in imp.get_suffixes(): - if suffix[0].startswith('.abi'): - abi3s.add(suffix[0].split('.', 2)[1]) + for suffix in get_extension_suffixes(): + if suffix.startswith('.abi'): + abi3s.add(suffix.split('.', 2)[1]) abis.extend(sorted(list(abi3s))) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/pyproject.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/pyproject.py new file mode 100644 index 0000000..f938a76 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/pyproject.py @@ -0,0 +1,144 @@ +from __future__ import absolute_import + +import io +import os + +from pip._vendor import pytoml, six + +from pip._internal.exceptions import InstallationError + + +def _is_list_of_str(obj): + return ( + isinstance(obj, list) and + all(isinstance(item, six.string_types) for item in obj) + ) + + +def load_pyproject_toml(use_pep517, pyproject_toml, setup_py, req_name): + """Load the pyproject.toml file. + + Parameters: + use_pep517 - Has the user requested PEP 517 processing? None + means the user hasn't explicitly specified. + pyproject_toml - Location of the project's pyproject.toml file + setup_py - Location of the project's setup.py file + req_name - The name of the requirement we're processing (for + error reporting) + + Returns: + None if we should use the legacy code path, otherwise a tuple + ( + requirements from pyproject.toml, + name of PEP 517 backend, + requirements we should check are installed after setting + up the build environment + ) + """ + has_pyproject = os.path.isfile(pyproject_toml) + has_setup = os.path.isfile(setup_py) + + if has_pyproject: + with io.open(pyproject_toml, encoding="utf-8") as f: + pp_toml = pytoml.load(f) + build_system = pp_toml.get("build-system") + else: + build_system = None + + # The following cases must use PEP 517 + # We check for use_pep517 equalling False because that + # means the user explicitly requested --no-use-pep517 + if has_pyproject and not has_setup: + if use_pep517 is False: + raise InstallationError( + "Disabling PEP 517 processing is invalid: " + "project does not have a setup.py" + ) + use_pep517 = True + elif build_system and "build-backend" in build_system: + if use_pep517 is False: + raise InstallationError( + "Disabling PEP 517 processing is invalid: " + "project specifies a build backend of {} " + "in pyproject.toml".format( + build_system["build-backend"] + ) + ) + use_pep517 = True + + # If we haven't worked out whether to use PEP 517 yet, + # and the user hasn't explicitly stated a preference, + # we do so if the project has a pyproject.toml file. + elif use_pep517 is None: + use_pep517 = has_pyproject + + # At this point, we know whether we're going to use PEP 517. + assert use_pep517 is not None + + # If we're using the legacy code path, there is nothing further + # for us to do here. + if not use_pep517: + return None + + if build_system is None: + # Either the user has a pyproject.toml with no build-system + # section, or the user has no pyproject.toml, but has opted in + # explicitly via --use-pep517. + # In the absence of any explicit backend specification, we + # assume the setuptools backend, and require wheel and a version + # of setuptools that supports that backend. + build_system = { + "requires": ["setuptools>=38.2.5", "wheel"], + "build-backend": "setuptools.build_meta", + } + + # If we're using PEP 517, we have build system information (either + # from pyproject.toml, or defaulted by the code above). + # Note that at this point, we do not know if the user has actually + # specified a backend, though. + assert build_system is not None + + # Ensure that the build-system section in pyproject.toml conforms + # to PEP 518. + error_template = ( + "{package} has a pyproject.toml file that does not comply " + "with PEP 518: {reason}" + ) + + # Specifying the build-system table but not the requires key is invalid + if "requires" not in build_system: + raise InstallationError( + error_template.format(package=req_name, reason=( + "it has a 'build-system' table but not " + "'build-system.requires' which is mandatory in the table" + )) + ) + + # Error out if requires is not a list of strings + requires = build_system["requires"] + if not _is_list_of_str(requires): + raise InstallationError(error_template.format( + package=req_name, + reason="'build-system.requires' is not a list of strings.", + )) + + backend = build_system.get("build-backend") + check = [] + if backend is None: + # If the user didn't specify a backend, we assume they want to use + # the setuptools backend. But we can't be sure they have included + # a version of setuptools which supplies the backend, or wheel + # (which is neede by the backend) in their requirements. So we + # make a note to check that those requirements are present once + # we have set up the environment. + # TODO: Review this - it's quite a lot of work to check for a very + # specific case. The problem is, that case is potentially quite + # common - projects that adopted PEP 518 early for the ability to + # specify requirements to execute setup.py, but never considered + # needing to mention the build tools themselves. The original PEP + # 518 code had a similar check (but implemented in a different + # way). + backend = "setuptools.build_meta" + check = ["setuptools>=38.2.5", "wheel"] + + return (requires, backend, check) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/req/constructors.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/req/constructors.py new file mode 100644 index 0000000..4c4641d --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/req/constructors.py @@ -0,0 +1,298 @@ +"""Backing implementation for InstallRequirement's various constructors + +The idea here is that these formed a major chunk of InstallRequirement's size +so, moving them and support code dedicated to them outside of that class +helps creates for better understandability for the rest of the code. + +These are meant to be used elsewhere within pip to create instances of +InstallRequirement. +""" + +import logging +import os +import re +import traceback + +from pip._vendor.packaging.markers import Marker +from pip._vendor.packaging.requirements import InvalidRequirement, Requirement +from pip._vendor.packaging.specifiers import Specifier +from pip._vendor.pkg_resources import RequirementParseError, parse_requirements + +from pip._internal.download import ( + is_archive_file, is_url, path_to_url, url_to_path, +) +from pip._internal.exceptions import InstallationError +from pip._internal.models.index import PyPI, TestPyPI +from pip._internal.models.link import Link +from pip._internal.req.req_install import InstallRequirement +from pip._internal.utils.misc import is_installable_dir +from pip._internal.vcs import vcs +from pip._internal.wheel import Wheel + +__all__ = [ + "install_req_from_editable", "install_req_from_line", + "parse_editable" +] + +logger = logging.getLogger(__name__) +operators = Specifier._operators.keys() + + +def _strip_extras(path): + m = re.match(r'^(.+)(\[[^\]]+\])$', path) + extras = None + if m: + path_no_extras = m.group(1) + extras = m.group(2) + else: + path_no_extras = path + + return path_no_extras, extras + + +def parse_editable(editable_req): + """Parses an editable requirement into: + - a requirement name + - an URL + - extras + - editable options + Accepted requirements: + svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir + .[some_extra] + """ + + url = editable_req + + # If a file path is specified with extras, strip off the extras. + url_no_extras, extras = _strip_extras(url) + + if os.path.isdir(url_no_extras): + if not os.path.exists(os.path.join(url_no_extras, 'setup.py')): + raise InstallationError( + "Directory %r is not installable. File 'setup.py' not found." % + url_no_extras + ) + # Treating it as code that has already been checked out + url_no_extras = path_to_url(url_no_extras) + + if url_no_extras.lower().startswith('file:'): + package_name = Link(url_no_extras).egg_fragment + if extras: + return ( + package_name, + url_no_extras, + Requirement("placeholder" + extras.lower()).extras, + ) + else: + return package_name, url_no_extras, None + + for version_control in vcs: + if url.lower().startswith('%s:' % version_control): + url = '%s+%s' % (version_control, url) + break + + if '+' not in url: + raise InstallationError( + '%s should either be a path to a local project or a VCS url ' + 'beginning with svn+, git+, hg+, or bzr+' % + editable_req + ) + + vc_type = url.split('+', 1)[0].lower() + + if not vcs.get_backend(vc_type): + error_message = 'For --editable=%s only ' % editable_req + \ + ', '.join([backend.name + '+URL' for backend in vcs.backends]) + \ + ' is currently supported' + raise InstallationError(error_message) + + package_name = Link(url).egg_fragment + if not package_name: + raise InstallationError( + "Could not detect requirement name for '%s', please specify one " + "with #egg=your_package_name" % editable_req + ) + return package_name, url, None + + +def deduce_helpful_msg(req): + """Returns helpful msg in case requirements file does not exist, + or cannot be parsed. + + :params req: Requirements file path + """ + msg = "" + if os.path.exists(req): + msg = " It does exist." + # Try to parse and check if it is a requirements file. + try: + with open(req, 'r') as fp: + # parse first line only + next(parse_requirements(fp.read())) + msg += " The argument you provided " + \ + "(%s) appears to be a" % (req) + \ + " requirements file. If that is the" + \ + " case, use the '-r' flag to install" + \ + " the packages specified within it." + except RequirementParseError: + logger.debug("Cannot parse '%s' as requirements \ + file" % (req), exc_info=1) + else: + msg += " File '%s' does not exist." % (req) + return msg + + +# ---- The actual constructors follow ---- + + +def install_req_from_editable( + editable_req, comes_from=None, isolated=False, options=None, + wheel_cache=None, constraint=False +): + name, url, extras_override = parse_editable(editable_req) + if url.startswith('file:'): + source_dir = url_to_path(url) + else: + source_dir = None + + if name is not None: + try: + req = Requirement(name) + except InvalidRequirement: + raise InstallationError("Invalid requirement: '%s'" % name) + else: + req = None + return InstallRequirement( + req, comes_from, source_dir=source_dir, + editable=True, + link=Link(url), + constraint=constraint, + isolated=isolated, + options=options if options else {}, + wheel_cache=wheel_cache, + extras=extras_override or (), + ) + + +def install_req_from_line( + name, comes_from=None, isolated=False, options=None, wheel_cache=None, + constraint=False +): + """Creates an InstallRequirement from a name, which might be a + requirement, directory containing 'setup.py', filename, or URL. + """ + if is_url(name): + marker_sep = '; ' + else: + marker_sep = ';' + if marker_sep in name: + name, markers = name.split(marker_sep, 1) + markers = markers.strip() + if not markers: + markers = None + else: + markers = Marker(markers) + else: + markers = None + name = name.strip() + req = None + path = os.path.normpath(os.path.abspath(name)) + link = None + extras = None + + if is_url(name): + link = Link(name) + else: + p, extras = _strip_extras(path) + looks_like_dir = os.path.isdir(p) and ( + os.path.sep in name or + (os.path.altsep is not None and os.path.altsep in name) or + name.startswith('.') + ) + if looks_like_dir: + if not is_installable_dir(p): + raise InstallationError( + "Directory %r is not installable. Neither 'setup.py' " + "nor 'pyproject.toml' found." % name + ) + link = Link(path_to_url(p)) + elif is_archive_file(p): + if not os.path.isfile(p): + logger.warning( + 'Requirement %r looks like a filename, but the ' + 'file does not exist', + name + ) + link = Link(path_to_url(p)) + + # it's a local file, dir, or url + if link: + # Handle relative file URLs + if link.scheme == 'file' and re.search(r'\.\./', link.url): + link = Link( + path_to_url(os.path.normpath(os.path.abspath(link.path)))) + # wheel file + if link.is_wheel: + wheel = Wheel(link.filename) # can raise InvalidWheelFilename + req = "%s==%s" % (wheel.name, wheel.version) + else: + # set the req to the egg fragment. when it's not there, this + # will become an 'unnamed' requirement + req = link.egg_fragment + + # a requirement specifier + else: + req = name + + if extras: + extras = Requirement("placeholder" + extras.lower()).extras + else: + extras = () + if req is not None: + try: + req = Requirement(req) + except InvalidRequirement: + if os.path.sep in req: + add_msg = "It looks like a path." + add_msg += deduce_helpful_msg(req) + elif '=' in req and not any(op in req for op in operators): + add_msg = "= is not a valid operator. Did you mean == ?" + else: + add_msg = traceback.format_exc() + raise InstallationError( + "Invalid requirement: '%s'\n%s" % (req, add_msg) + ) + + return InstallRequirement( + req, comes_from, link=link, markers=markers, + isolated=isolated, + options=options if options else {}, + wheel_cache=wheel_cache, + constraint=constraint, + extras=extras, + ) + + +def install_req_from_req( + req, comes_from=None, isolated=False, wheel_cache=None +): + try: + req = Requirement(req) + except InvalidRequirement: + raise InstallationError("Invalid requirement: '%s'" % req) + + domains_not_allowed = [ + PyPI.file_storage_domain, + TestPyPI.file_storage_domain, + ] + if req.url and comes_from.link.netloc in domains_not_allowed: + # Explicitly disallow pypi packages that depend on external urls + raise InstallationError( + "Packages installed from PyPI cannot depend on packages " + "which are not also hosted on PyPI.\n" + "%s depends on %s " % (comes_from.name, req) + ) + + return InstallRequirement( + req, comes_from, isolated=isolated, wheel_cache=wheel_cache + ) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/req/req_file.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/req/req_file.py index f868497..e7acf7c 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/req/req_file.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/req/req_file.py @@ -13,10 +13,12 @@ import sys from pip._vendor.six.moves import filterfalse from pip._vendor.six.moves.urllib import parse as urllib_parse -from pip._internal import cmdoptions +from pip._internal.cli import cmdoptions from pip._internal.download import get_file_content from pip._internal.exceptions import RequirementsFileParseError -from pip._internal.req.req_install import InstallRequirement +from pip._internal.req.constructors import ( + install_req_from_editable, install_req_from_line, +) __all__ = ['parse_requirements'] @@ -151,7 +153,7 @@ def process_line(line, filename, line_number, finder=None, comes_from=None, for dest in SUPPORTED_OPTIONS_REQ_DEST: if dest in opts.__dict__ and opts.__dict__[dest]: req_options[dest] = opts.__dict__[dest] - yield InstallRequirement.from_line( + yield install_req_from_line( args_str, line_comes_from, constraint=constraint, isolated=isolated, options=req_options, wheel_cache=wheel_cache ) @@ -159,7 +161,7 @@ def process_line(line, filename, line_number, finder=None, comes_from=None, # yield an editable requirement elif opts.editables: isolated = options.isolated_mode if options else False - yield InstallRequirement.from_editable( + yield install_req_from_editable( opts.editables[0], comes_from=line_comes_from, constraint=constraint, isolated=isolated, wheel_cache=wheel_cache ) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/req/req_install.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/req/req_install.py index 462c80a..c2624fe 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/req/req_install.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/req/req_install.py @@ -1,66 +1,46 @@ from __future__ import absolute_import -import io import logging import os -import re import shutil import sys import sysconfig -import traceback import zipfile from distutils.util import change_root -from email.parser import FeedParser # type: ignore -from pip._vendor import pkg_resources, pytoml, six -from pip._vendor.packaging import specifiers -from pip._vendor.packaging.markers import Marker -from pip._vendor.packaging.requirements import InvalidRequirement, Requirement +from pip._vendor import pkg_resources, six +from pip._vendor.packaging.requirements import Requirement from pip._vendor.packaging.utils import canonicalize_name from pip._vendor.packaging.version import Version from pip._vendor.packaging.version import parse as parse_version -from pip._vendor.pkg_resources import RequirementParseError, parse_requirements +from pip._vendor.pep517.wrappers import Pep517HookCaller from pip._internal import wheel from pip._internal.build_env import NoOpBuildEnvironment -from pip._internal.compat import native_str -from pip._internal.download import ( - is_archive_file, is_url, path_to_url, url_to_path, -) from pip._internal.exceptions import InstallationError from pip._internal.locations import ( PIP_DELETE_MARKER_FILENAME, running_under_virtualenv, ) +from pip._internal.models.link import Link +from pip._internal.pyproject import load_pyproject_toml from pip._internal.req.req_uninstall import UninstallPathSet +from pip._internal.utils.compat import native_str from pip._internal.utils.hashes import Hashes from pip._internal.utils.logging import indent_log from pip._internal.utils.misc import ( _make_build_dir, ask_path_exists, backup_dir, call_subprocess, display_path, dist_in_site_packages, dist_in_usersite, ensure_dir, - get_installed_version, is_installable_dir, read_text_file, rmtree, + get_installed_version, rmtree, ) +from pip._internal.utils.packaging import get_metadata from pip._internal.utils.setuptools_build import SETUPTOOLS_SHIM from pip._internal.utils.temp_dir import TempDirectory from pip._internal.utils.ui import open_spinner from pip._internal.vcs import vcs -from pip._internal.wheel import Wheel, move_wheel_files +from pip._internal.wheel import move_wheel_files logger = logging.getLogger(__name__) -operators = specifiers.Specifier._operators.keys() - - -def _strip_extras(path): - m = re.match(r'^(.+)(\[[^\]]+\])$', path) - extras = None - if m: - path_no_extras = m.group(1) - extras = m.group(2) - else: - path_no_extras = path - - return path_no_extras, extras - class InstallRequirement(object): """ @@ -87,7 +67,6 @@ class InstallRequirement(object): if link is not None: self.link = self.original_link = link else: - from pip._internal.index import Link self.link = self.original_link = req and req.url and Link(req.url) if extras: @@ -128,155 +107,32 @@ class InstallRequirement(object): self.isolated = isolated self.build_env = NoOpBuildEnvironment() - # Constructors - # TODO: Move these out of this class into custom methods. - @classmethod - def from_editable(cls, editable_req, comes_from=None, isolated=False, - options=None, wheel_cache=None, constraint=False): - from pip._internal.index import Link + # The static build requirements (from pyproject.toml) + self.pyproject_requires = None - name, url, extras_override = parse_editable(editable_req) - if url.startswith('file:'): - source_dir = url_to_path(url) - else: - source_dir = None + # Build requirements that we will check are available + # TODO: We don't do this for --no-build-isolation. Should we? + self.requirements_to_check = [] - if name is not None: - try: - req = Requirement(name) - except InvalidRequirement: - raise InstallationError("Invalid requirement: '%s'" % name) - else: - req = None - return cls( - req, comes_from, source_dir=source_dir, - editable=True, - link=Link(url), - constraint=constraint, - isolated=isolated, - options=options if options else {}, - wheel_cache=wheel_cache, - extras=extras_override or (), - ) + # The PEP 517 backend we should use to build the project + self.pep517_backend = None - @classmethod - def from_req(cls, req, comes_from=None, isolated=False, wheel_cache=None): - try: - req = Requirement(req) - except InvalidRequirement: - raise InstallationError("Invalid requirement: '%s'" % req) - if req.url: - raise InstallationError( - "Direct url requirement (like %s) are not allowed for " - "dependencies" % req - ) - return cls(req, comes_from, isolated=isolated, wheel_cache=wheel_cache) - - @classmethod - def from_line( - cls, name, comes_from=None, isolated=False, options=None, - wheel_cache=None, constraint=False): - """Creates an InstallRequirement from a name, which might be a - requirement, directory containing 'setup.py', filename, or URL. - """ - from pip._internal.index import Link - - if is_url(name): - marker_sep = '; ' - else: - marker_sep = ';' - if marker_sep in name: - name, markers = name.split(marker_sep, 1) - markers = markers.strip() - if not markers: - markers = None - else: - markers = Marker(markers) - else: - markers = None - name = name.strip() - req = None - path = os.path.normpath(os.path.abspath(name)) - link = None - extras = None - - if is_url(name): - link = Link(name) - else: - p, extras = _strip_extras(path) - looks_like_dir = os.path.isdir(p) and ( - os.path.sep in name or - (os.path.altsep is not None and os.path.altsep in name) or - name.startswith('.') - ) - if looks_like_dir: - if not is_installable_dir(p): - raise InstallationError( - "Directory %r is not installable. File 'setup.py' " - "not found." % name - ) - link = Link(path_to_url(p)) - elif is_archive_file(p): - if not os.path.isfile(p): - logger.warning( - 'Requirement %r looks like a filename, but the ' - 'file does not exist', - name - ) - link = Link(path_to_url(p)) - - # it's a local file, dir, or url - if link: - # Handle relative file URLs - if link.scheme == 'file' and re.search(r'\.\./', link.url): - link = Link( - path_to_url(os.path.normpath(os.path.abspath(link.path)))) - # wheel file - if link.is_wheel: - wheel = Wheel(link.filename) # can raise InvalidWheelFilename - req = "%s==%s" % (wheel.name, wheel.version) - else: - # set the req to the egg fragment. when it's not there, this - # will become an 'unnamed' requirement - req = link.egg_fragment - - # a requirement specifier - else: - req = name - - if extras: - extras = Requirement("placeholder" + extras.lower()).extras - else: - extras = () - if req is not None: - try: - req = Requirement(req) - except InvalidRequirement: - if os.path.sep in req: - add_msg = "It looks like a path." - add_msg += deduce_helpful_msg(req) - elif '=' in req and not any(op in req for op in operators): - add_msg = "= is not a valid operator. Did you mean == ?" - else: - add_msg = traceback.format_exc() - raise InstallationError( - "Invalid requirement: '%s'\n%s" % (req, add_msg)) - return cls( - req, comes_from, link=link, markers=markers, - isolated=isolated, - options=options if options else {}, - wheel_cache=wheel_cache, - constraint=constraint, - extras=extras, - ) + # Are we using PEP 517 for this requirement? + # After pyproject.toml has been loaded, the only valid values are True + # and False. Before loading, None is valid (meaning "use the default"). + # Setting an explicit value before loading pyproject.toml is supported, + # but after loading this flag should be treated as read only. + self.use_pep517 = None def __str__(self): if self.req: s = str(self.req) if self.link: s += ' from %s' % self.link.url + elif self.link: + s = self.link.url else: - s = self.link.url if self.link else None + s = '' if self.satisfied_by is not None: s += ' in %s' % display_path(self.satisfied_by.location) if self.comes_from: @@ -429,7 +285,7 @@ class InstallRequirement(object): package is not available until we run egg_info, so the build_location will return a temporary directory and store the _ideal_build_dir. - This is only called by self.egg_info_path to fix the temporary build + This is only called by self.run_egg_info to fix the temporary build directory. """ if self.source_dir is not None: @@ -557,48 +413,29 @@ class InstallRequirement(object): return pp_toml - def get_pep_518_info(self): - """Get PEP 518 build-time requirements. + def load_pyproject_toml(self): + """Load the pyproject.toml file. - Returns the list of the packages required to build the project, - specified as per PEP 518 within the package. If `pyproject.toml` is not - present, returns None to signify not using the same. + After calling this routine, all of the attributes related to PEP 517 + processing for this requirement have been set. In particular, the + use_pep517 attribute can be used to determine whether we should + follow the PEP 517 or legacy (setup.py) code path. """ - # If pyproject.toml does not exist, don't do anything. - if not os.path.isfile(self.pyproject_toml): - return None - - error_template = ( - "{package} has a pyproject.toml file that does not comply " - "with PEP 518: {reason}" + pep517_data = load_pyproject_toml( + self.use_pep517, + self.pyproject_toml, + self.setup_py, + str(self) ) - with io.open(self.pyproject_toml, encoding="utf-8") as f: - pp_toml = pytoml.load(f) - - # If there is no build-system table, just use setuptools and wheel. - if "build-system" not in pp_toml: - return ["setuptools", "wheel"] - - # Specifying the build-system table but not the requires key is invalid - build_system = pp_toml["build-system"] - if "requires" not in build_system: - raise InstallationError( - error_template.format(package=self, reason=( - "it has a 'build-system' table but not " - "'build-system.requires' which is mandatory in the table" - )) - ) - - # Error out if it's not a list of strings - requires = build_system["requires"] - if not _is_list_of_str(requires): - raise InstallationError(error_template.format( - package=self, - reason="'build-system.requires' is not a list of strings.", - )) - - return requires + if pep517_data is None: + self.use_pep517 = False + else: + self.use_pep517 = True + requires, backend, check = pep517_data + self.requirements_to_check = check + self.pyproject_requires = requires + self.pep517_backend = Pep517HookCaller(self.setup_py_dir, backend) def run_egg_info(self): assert self.source_dir @@ -636,20 +473,20 @@ class InstallRequirement(object): command_desc='python setup.py egg_info') if not self.req: - if isinstance(parse_version(self.pkg_info()["Version"]), Version): + if isinstance(parse_version(self.metadata["Version"]), Version): op = "==" else: op = "===" self.req = Requirement( "".join([ - self.pkg_info()["Name"], + self.metadata["Name"], op, - self.pkg_info()["Version"], + self.metadata["Version"], ]) ) self._correct_build_location() else: - metadata_name = canonicalize_name(self.pkg_info()["Name"]) + metadata_name = canonicalize_name(self.metadata["Name"]) if canonicalize_name(self.req.name) != metadata_name: logger.warning( 'Running setup.py (path:%s) egg_info for package %s ' @@ -659,19 +496,8 @@ class InstallRequirement(object): ) self.req = Requirement(metadata_name) - def egg_info_data(self, filename): - if self.satisfied_by is not None: - if not self.satisfied_by.has_metadata(filename): - return None - return self.satisfied_by.get_metadata(filename) - assert self.source_dir - filename = self.egg_info_path(filename) - if not os.path.exists(filename): - return None - data = read_text_file(filename) - return data - - def egg_info_path(self, filename): + @property + def egg_info_path(self): if self._egg_info_path is None: if self.editable: base = self.source_dir @@ -709,8 +535,7 @@ class InstallRequirement(object): if not filenames: raise InstallationError( - "Files/directories (from %s) not found in %s" - % (filename, base) + "Files/directories not found in %s" % base ) # if we have more than one match, we pick the toplevel one. This # can easily be the case if there is a dist folder which contains @@ -721,24 +546,18 @@ class InstallRequirement(object): (os.path.altsep and x.count(os.path.altsep) or 0) ) self._egg_info_path = os.path.join(base, filenames[0]) - return os.path.join(self._egg_info_path, filename) + return self._egg_info_path - def pkg_info(self): - p = FeedParser() - data = self.egg_info_data('PKG-INFO') - if not data: - logger.warning( - 'No PKG-INFO file found in %s', - display_path(self.egg_info_path('PKG-INFO')), - ) - p.feed(data or '') - return p.close() + @property + def metadata(self): + if not hasattr(self, '_metadata'): + self._metadata = get_metadata(self.get_dist()) - _requirements_section_re = re.compile(r'\[(.*?)\]') + return self._metadata def get_dist(self): """Return a pkg_resources.Distribution built from self.egg_info_path""" - egg_info = self.egg_info_path('').rstrip(os.path.sep) + egg_info = self.egg_info_path.rstrip(os.path.sep) base_dir = os.path.dirname(egg_info) metadata = pkg_resources.PathMetadata(base_dir, egg_info) dist_name = os.path.splitext(os.path.basename(egg_info))[0] @@ -750,7 +569,7 @@ class InstallRequirement(object): def assert_source_matches_version(self): assert self.source_dir - version = self.pkg_info()['version'] + version = self.metadata['version'] if self.req.specifier and version not in self.req.specifier: logger.warning( 'Requested %s, but installing version %s', @@ -877,7 +696,7 @@ class InstallRequirement(object): def archive(self, build_dir): assert self.source_dir create_archive = True - archive_name = '%s-%s.zip' % (self.name, self.pkg_info()["version"]) + archive_name = '%s-%s.zip' % (self.name, self.metadata["version"]) archive_path = os.path.join(build_dir, archive_name) if os.path.exists(archive_path): response = ask_path_exists( @@ -1039,104 +858,3 @@ class InstallRequirement(object): py_ver_str, self.name)] return install_args - - -def parse_editable(editable_req): - """Parses an editable requirement into: - - a requirement name - - an URL - - extras - - editable options - Accepted requirements: - svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir - .[some_extra] - """ - - from pip._internal.index import Link - - url = editable_req - - # If a file path is specified with extras, strip off the extras. - url_no_extras, extras = _strip_extras(url) - - if os.path.isdir(url_no_extras): - if not os.path.exists(os.path.join(url_no_extras, 'setup.py')): - raise InstallationError( - "Directory %r is not installable. File 'setup.py' not found." % - url_no_extras - ) - # Treating it as code that has already been checked out - url_no_extras = path_to_url(url_no_extras) - - if url_no_extras.lower().startswith('file:'): - package_name = Link(url_no_extras).egg_fragment - if extras: - return ( - package_name, - url_no_extras, - Requirement("placeholder" + extras.lower()).extras, - ) - else: - return package_name, url_no_extras, None - - for version_control in vcs: - if url.lower().startswith('%s:' % version_control): - url = '%s+%s' % (version_control, url) - break - - if '+' not in url: - raise InstallationError( - '%s should either be a path to a local project or a VCS url ' - 'beginning with svn+, git+, hg+, or bzr+' % - editable_req - ) - - vc_type = url.split('+', 1)[0].lower() - - if not vcs.get_backend(vc_type): - error_message = 'For --editable=%s only ' % editable_req + \ - ', '.join([backend.name + '+URL' for backend in vcs.backends]) + \ - ' is currently supported' - raise InstallationError(error_message) - - package_name = Link(url).egg_fragment - if not package_name: - raise InstallationError( - "Could not detect requirement name for '%s', please specify one " - "with #egg=your_package_name" % editable_req - ) - return package_name, url, None - - -def deduce_helpful_msg(req): - """Returns helpful msg in case requirements file does not exist, - or cannot be parsed. - - :params req: Requirements file path - """ - msg = "" - if os.path.exists(req): - msg = " It does exist." - # Try to parse and check if it is a requirements file. - try: - with open(req, 'r') as fp: - # parse first line only - next(parse_requirements(fp.read())) - msg += " The argument you provided " + \ - "(%s) appears to be a" % (req) + \ - " requirements file. If that is the" + \ - " case, use the '-r' flag to install" + \ - " the packages specified within it." - except RequirementParseError: - logger.debug("Cannot parse '%s' as requirements \ - file" % (req), exc_info=1) - else: - msg += " File '%s' does not exist." % (req) - return msg - - -def _is_list_of_str(obj): - return ( - isinstance(obj, list) and - all(isinstance(item, six.string_types) for item in obj) - ) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/req/req_set.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/req/req_set.py index 2bc6b74..b198317 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/req/req_set.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/req/req_set.py @@ -12,12 +12,13 @@ logger = logging.getLogger(__name__) class RequirementSet(object): - def __init__(self, require_hashes=False): + def __init__(self, require_hashes=False, check_supported_wheels=True): """Create a RequirementSet. """ self.requirements = OrderedDict() self.require_hashes = require_hashes + self.check_supported_wheels = check_supported_wheels # Mapping of alias: real_name self.requirement_aliases = {} @@ -55,17 +56,22 @@ class RequirementSet(object): requirement is applicable and has just been added. """ name = install_req.name + + # If the markers do not match, ignore this requirement. if not install_req.match_markers(extras_requested): - logger.info("Ignoring %s: markers '%s' don't match your " - "environment", install_req.name, - install_req.markers) + logger.info( + "Ignoring %s: markers '%s' don't match your environment", + name, install_req.markers, + ) return [], None - # This check has to come after we filter requirements with the - # environment markers. + # If the wheel is not supported, raise an error. + # Should check this after filtering out based on environment markers to + # allow specifying different wheels based on the environment/OS, in a + # single requirements file. if install_req.link and install_req.link.is_wheel: wheel = Wheel(install_req.link.filename) - if not wheel.supported(): + if self.check_supported_wheels and not wheel.supported(): raise InstallationError( "%s is not a supported wheel on this platform." % wheel.filename @@ -77,59 +83,73 @@ class RequirementSet(object): "a non direct req should have a parent" ) + # Unnamed requirements are scanned again and the requirement won't be + # added as a dependency until after scanning. if not name: # url or path requirement w/o an egg fragment self.unnamed_requirements.append(install_req) return [install_req], None - else: - try: - existing_req = self.get_requirement(name) - except KeyError: - existing_req = None - if (parent_req_name is None and existing_req and not - existing_req.constraint and - existing_req.extras == install_req.extras and not - existing_req.req.specifier == install_req.req.specifier): - raise InstallationError( - 'Double requirement given: %s (already in %s, name=%r)' - % (install_req, existing_req, name)) - if not existing_req: - # Add requirement - self.requirements[name] = install_req - # FIXME: what about other normalizations? E.g., _ vs. -? - if name.lower() != name: - self.requirement_aliases[name.lower()] = name - result = [install_req] - else: - # Assume there's no need to scan, and that we've already - # encountered this for scanning. - result = [] - if not install_req.constraint and existing_req.constraint: - if (install_req.link and not (existing_req.link and - install_req.link.path == existing_req.link.path)): - self.reqs_to_cleanup.append(install_req) - raise InstallationError( - "Could not satisfy constraints for '%s': " - "installation from path or url cannot be " - "constrained to a version" % name, - ) - # If we're now installing a constraint, mark the existing - # object for real installation. - existing_req.constraint = False - existing_req.extras = tuple( - sorted(set(existing_req.extras).union( - set(install_req.extras)))) - logger.debug("Setting %s extras to: %s", - existing_req, existing_req.extras) - # And now we need to scan this. - result = [existing_req] - # Canonicalise to the already-added object for the backref - # check below. - install_req = existing_req - # We return install_req here to allow for the caller to add it to - # the dependency information for the parent package. - return result, install_req + try: + existing_req = self.get_requirement(name) + except KeyError: + existing_req = None + + has_conflicting_requirement = ( + parent_req_name is None and + existing_req and + not existing_req.constraint and + existing_req.extras == install_req.extras and + existing_req.req.specifier != install_req.req.specifier + ) + if has_conflicting_requirement: + raise InstallationError( + "Double requirement given: %s (already in %s, name=%r)" + % (install_req, existing_req, name) + ) + + # When no existing requirement exists, add the requirement as a + # dependency and it will be scanned again after. + if not existing_req: + self.requirements[name] = install_req + # FIXME: what about other normalizations? E.g., _ vs. -? + if name.lower() != name: + self.requirement_aliases[name.lower()] = name + # We'd want to rescan this requirements later + return [install_req], install_req + + # Assume there's no need to scan, and that we've already + # encountered this for scanning. + if install_req.constraint or not existing_req.constraint: + return [], existing_req + + does_not_satisfy_constraint = ( + install_req.link and + not ( + existing_req.link and + install_req.link.path == existing_req.link.path + ) + ) + if does_not_satisfy_constraint: + self.reqs_to_cleanup.append(install_req) + raise InstallationError( + "Could not satisfy constraints for '%s': " + "installation from path or url cannot be " + "constrained to a version" % name, + ) + # If we're now installing a constraint, mark the existing + # object for real installation. + existing_req.constraint = False + existing_req.extras = tuple(sorted( + set(existing_req.extras) | set(install_req.extras) + )) + logger.debug( + "Setting %s extras to: %s", + existing_req, existing_req.extras, + ) + # Return the existing requirement for addition to the parent and + # scanning again. + return [existing_req], existing_req def has_requirement(self, project_name): name = project_name.lower() diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/req/req_uninstall.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/req/req_uninstall.py index f7ec3a8..a7d8230 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/req/req_uninstall.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/req/req_uninstall.py @@ -9,9 +9,9 @@ import sysconfig from pip._vendor import pkg_resources -from pip._internal.compat import WINDOWS, cache_from_source, uses_pycache from pip._internal.exceptions import UninstallationError from pip._internal.locations import bin_py, bin_user +from pip._internal.utils.compat import WINDOWS, cache_from_source, uses_pycache from pip._internal.utils.logging import indent_log from pip._internal.utils.misc import ( FakeFile, ask, dist_in_usersite, dist_is_local, egg_link_path, is_local, @@ -120,6 +120,8 @@ def compress_for_output_listing(paths): folders.add(os.path.dirname(path)) files.add(path) + _normcased_files = set(map(os.path.normcase, files)) + folders = compact(folders) # This walks the tree using os.walk to not miss extra folders @@ -130,8 +132,9 @@ def compress_for_output_listing(paths): if fname.endswith(".pyc"): continue - file_ = os.path.normcase(os.path.join(dirpath, fname)) - if os.path.isfile(file_) and file_ not in files: + file_ = os.path.join(dirpath, fname) + if (os.path.isfile(file_) and + os.path.normcase(file_) not in _normcased_files): # We are skipping this file. Add it to the set. will_skip.add(file_) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/resolve.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/resolve.py index 8480e48..2d9f1c5 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/resolve.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/resolve.py @@ -18,7 +18,7 @@ from pip._internal.exceptions import ( BestVersionAlreadyInstalled, DistributionNotFound, HashError, HashErrors, UnsupportedPythonVersion, ) -from pip._internal.req.req_install import InstallRequirement +from pip._internal.req.constructors import install_req_from_req from pip._internal.utils.logging import indent_log from pip._internal.utils.misc import dist_in_usersite, ensure_dir from pip._internal.utils.packaging import check_dist_requires_python @@ -268,7 +268,7 @@ class Resolver(object): more_reqs = [] def add_req(subreq, extras_requested): - sub_install_req = InstallRequirement.from_req( + sub_install_req = install_req_from_req( str(subreq), req_to_install, isolated=self.isolated, diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/appdirs.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/appdirs.py index 28c5d4b..cc96f98 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/appdirs.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/appdirs.py @@ -9,7 +9,7 @@ import sys from pip._vendor.six import PY2, text_type -from pip._internal.compat import WINDOWS, expanduser +from pip._internal.utils.compat import WINDOWS, expanduser def user_cache_dir(appname): diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/compat.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/compat.py similarity index 96% rename from thesisenv/lib/python3.6/site-packages/pip/_internal/compat.py rename to thesisenv/lib/python3.6/site-packages/pip/_internal/utils/compat.py index e6c008d..3114f2d 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/compat.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/compat.py @@ -25,6 +25,7 @@ except ImportError: __all__ = [ "ipaddress", "uses_pycache", "console_to_str", "native_str", "get_path_uid", "stdlib_pkgs", "WINDOWS", "samefile", "get_terminal_size", + "get_extension_suffixes", ] @@ -160,6 +161,18 @@ def get_path_uid(path): return file_uid +if sys.version_info >= (3, 4): + from importlib.machinery import EXTENSION_SUFFIXES + + def get_extension_suffixes(): + return EXTENSION_SUFFIXES +else: + from imp import get_suffixes + + def get_extension_suffixes(): + return [suffix[0] for suffix in get_suffixes()] + + def expanduser(path): """ Expand ~ and ~user constructions. diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/filesystem.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/filesystem.py index ee45501..1e9cebd 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/filesystem.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/filesystem.py @@ -1,7 +1,7 @@ import os import os.path -from pip._internal.compat import get_path_uid +from pip._internal.utils.compat import get_path_uid def check_path_owner(path): diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/logging.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/logging.py index 66c1d39..d9b9541 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/logging.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/logging.py @@ -5,7 +5,7 @@ import logging import logging.handlers import os -from pip._internal.compat import WINDOWS +from pip._internal.utils.compat import WINDOWS from pip._internal.utils.misc import ensure_dir try: diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/misc.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/misc.py index 3236af6..84a421f 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/misc.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/misc.py @@ -26,14 +26,14 @@ from pip._vendor.six import PY2 from pip._vendor.six.moves import input from pip._vendor.six.moves.urllib import parse as urllib_parse -from pip._internal.compat import ( - WINDOWS, console_to_str, expanduser, stdlib_pkgs, -) from pip._internal.exceptions import CommandError, InstallationError from pip._internal.locations import ( running_under_virtualenv, site_packages, user_site, virtualenv_no_global, write_delete_marker_file, ) +from pip._internal.utils.compat import ( + WINDOWS, console_to_str, expanduser, stdlib_pkgs, +) if PY2: from io import BytesIO as StringIO @@ -187,12 +187,16 @@ def format_size(bytes): def is_installable_dir(path): - """Return True if `path` is a directory containing a setup.py file.""" + """Is path is a directory containing setup.py or pyproject.toml? + """ if not os.path.isdir(path): return False setup_py = os.path.join(path, 'setup.py') if os.path.isfile(setup_py): return True + pyproject_toml = os.path.join(path, 'pyproject.toml') + if os.path.isfile(pyproject_toml): + return True return False @@ -852,6 +856,44 @@ def enum(*sequential, **named): return type('Enum', (), enums) +def make_vcs_requirement_url(repo_url, rev, egg_project_name, subdir=None): + """ + Return the URL for a VCS requirement. + + Args: + repo_url: the remote VCS url, with any needed VCS prefix (e.g. "git+"). + """ + req = '{}@{}#egg={}'.format(repo_url, rev, egg_project_name) + if subdir: + req += '&subdirectory={}'.format(subdir) + + return req + + +def split_auth_from_netloc(netloc): + """ + Parse out and remove the auth information from a netloc. + + Returns: (netloc, (username, password)). + """ + if '@' not in netloc: + return netloc, (None, None) + + # Split from the right because that's how urllib.parse.urlsplit() + # behaves if more than one @ is present (which can be checked using + # the password attribute of urlsplit()'s return value). + auth, netloc = netloc.rsplit('@', 1) + if ':' in auth: + # Split from the left because that's how urllib.parse.urlsplit() + # behaves if more than one : is present (which again can be checked + # using the password attribute of the return value) + user_pass = tuple(auth.split(':', 1)) + else: + user_pass = auth, None + + return netloc, user_pass + + def remove_auth_from_url(url): # Return a copy of url with 'username:password@' removed. # username/pass params are passed to subversion through flags @@ -859,12 +901,11 @@ def remove_auth_from_url(url): # parsed url purl = urllib_parse.urlsplit(url) - stripped_netloc = \ - purl.netloc.split('@')[-1] + netloc, user_pass = split_auth_from_netloc(purl.netloc) # stripped url url_pieces = ( - purl.scheme, stripped_netloc, purl.path, purl.query, purl.fragment + purl.scheme, netloc, purl.path, purl.query, purl.fragment ) surl = urllib_parse.urlunsplit(url_pieces) return surl diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/models.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/models.py new file mode 100644 index 0000000..d5cb80a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/models.py @@ -0,0 +1,40 @@ +"""Utilities for defining models +""" + +import operator + + +class KeyBasedCompareMixin(object): + """Provides comparision capabilities that is based on a key + """ + + def __init__(self, key, defining_class): + self._compare_key = key + self._defining_class = defining_class + + def __hash__(self): + return hash(self._compare_key) + + def __lt__(self, other): + return self._compare(other, operator.__lt__) + + def __le__(self, other): + return self._compare(other, operator.__le__) + + def __gt__(self, other): + return self._compare(other, operator.__gt__) + + def __ge__(self, other): + return self._compare(other, operator.__ge__) + + def __eq__(self, other): + return self._compare(other, operator.__eq__) + + def __ne__(self, other): + return self._compare(other, operator.__ne__) + + def _compare(self, other, method): + if not isinstance(other, self._defining_class): + return NotImplemented + + return method(self._compare_key, other._compare_key) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/outdated.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/outdated.py index 2b3fa95..5bfbfe1 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/outdated.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/outdated.py @@ -9,8 +9,8 @@ import sys from pip._vendor import lockfile, pkg_resources from pip._vendor.packaging import version as packaging_version -from pip._internal.compat import WINDOWS from pip._internal.index import PackageFinder +from pip._internal.utils.compat import WINDOWS from pip._internal.utils.filesystem import check_path_owner from pip._internal.utils.misc import ensure_dir, get_installed_version @@ -22,16 +22,25 @@ logger = logging.getLogger(__name__) class SelfCheckState(object): def __init__(self, cache_dir): - self.statefile_path = os.path.join(cache_dir, "selfcheck.json") + self.state = {} + self.statefile_path = None - # Load the existing state - try: - with open(self.statefile_path) as statefile: - self.state = json.load(statefile)[sys.prefix] - except (IOError, ValueError, KeyError): - self.state = {} + # Try to load the existing state + if cache_dir: + self.statefile_path = os.path.join(cache_dir, "selfcheck.json") + try: + with open(self.statefile_path) as statefile: + self.state = json.load(statefile)[sys.prefix] + except (IOError, ValueError, KeyError): + # Explicitly suppressing exceptions, since we don't want to + # error out if the cache file is invalid. + pass def save(self, pypi_version, current_time): + # If we do not have a path to cache in, don't bother saving. + if not self.statefile_path: + return + # Check to make sure that we own the directory if not check_path_owner(os.path.dirname(self.statefile_path)): return diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/packaging.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/packaging.py index 5f9bb93..c43142f 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/packaging.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/packaging.py @@ -8,6 +8,7 @@ from pip._vendor import pkg_resources from pip._vendor.packaging import specifiers, version from pip._internal import exceptions +from pip._internal.utils.misc import display_path logger = logging.getLogger(__name__) @@ -35,16 +36,20 @@ def check_requires_python(requires_python): def get_metadata(dist): if (isinstance(dist, pkg_resources.DistInfoDistribution) and dist.has_metadata('METADATA')): - return dist.get_metadata('METADATA') + metadata = dist.get_metadata('METADATA') elif dist.has_metadata('PKG-INFO'): - return dist.get_metadata('PKG-INFO') + metadata = dist.get_metadata('PKG-INFO') + else: + logger.warning("No metadata found in %s", display_path(dist.location)) + metadata = '' + + feed_parser = FeedParser() + feed_parser.feed(metadata) + return feed_parser.close() def check_dist_requires_python(dist): - metadata = get_metadata(dist) - feed_parser = FeedParser() - feed_parser.feed(metadata) - pkg_info_dict = feed_parser.close() + pkg_info_dict = get_metadata(dist) requires_python = pkg_info_dict.get('Requires-Python') try: if not check_requires_python(requires_python): diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/ui.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/ui.py index 9429aae..6bab904 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/ui.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/utils/ui.py @@ -15,7 +15,7 @@ from pip._vendor.progress.bar import ( from pip._vendor.progress.helpers import HIDE_CURSOR, SHOW_CURSOR, WritelnMixin from pip._vendor.progress.spinner import Spinner -from pip._internal.compat import WINDOWS +from pip._internal.utils.compat import WINDOWS from pip._internal.utils.logging import get_indentation from pip._internal.utils.misc import format_size from pip._internal.utils.typing import MYPY_CHECK_RUNNING diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/__init__.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/__init__.py index e8b4deb..794b35d 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/__init__.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/__init__.py @@ -17,7 +17,7 @@ from pip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: from typing import Dict, Optional, Tuple # noqa: F401 - from pip._internal.basecommand import Command # noqa: F401 + from pip._internal.cli.base_command import Command # noqa: F401 __all__ = ['vcs', 'get_src_requirement'] @@ -200,12 +200,6 @@ class VersionControl(object): drive, tail = os.path.splitdrive(repo) return repo.startswith(os.path.sep) or drive - # See issue #1083 for why this method was introduced: - # https://github.com/pypa/pip/issues/1083 - def translate_egg_surname(self, surname): - # For example, Django has branches of the form "stable/1.7.x". - return surname.replace('/', '_') - def export(self, location): """ Export the repository at the url to the destination location @@ -213,51 +207,65 @@ class VersionControl(object): """ raise NotImplementedError - def get_url_rev(self, url): + def get_netloc_and_auth(self, netloc, scheme): """ - Returns the correct repository URL and revision by parsing the given - repository URL + Parse the repository URL's netloc, and return the new netloc to use + along with auth information. + + Args: + netloc: the original repository URL netloc. + scheme: the repository URL's scheme without the vcs prefix. + + This is mainly for the Subversion class to override, so that auth + information can be provided via the --username and --password options + instead of through the URL. For other subclasses like Git without + such an option, auth information must stay in the URL. + + Returns: (netloc, (username, password)). + """ + return netloc, (None, None) + + def get_url_rev_and_auth(self, url): + """ + Parse the repository URL to use, and return the URL, revision, + and auth info to use. + + Returns: (url, rev, (username, password)). """ - error_message = ( - "Sorry, '%s' is a malformed VCS url. " - "The format is +://, " - "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp" - ) - assert '+' in url, error_message % url - url = url.split('+', 1)[1] scheme, netloc, path, query, frag = urllib_parse.urlsplit(url) + if '+' not in scheme: + raise ValueError( + "Sorry, {!r} is a malformed VCS url. " + "The format is +://, " + "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp".format(url) + ) + # Remove the vcs prefix. + scheme = scheme.split('+', 1)[1] + netloc, user_pass = self.get_netloc_and_auth(netloc, scheme) rev = None if '@' in path: path, rev = path.rsplit('@', 1) url = urllib_parse.urlunsplit((scheme, netloc, path, query, '')) - return url, rev + return url, rev, user_pass - def get_url_rev_args(self, url): + def make_rev_args(self, username, password): """ - Return the URL and RevOptions "extra arguments" to use in obtain(), - as a tuple (url, extra_args). + Return the RevOptions "extra arguments" to use in obtain(). """ - return url, [] + return [] def get_url_rev_options(self, url): """ Return the URL and RevOptions object to use in obtain() and in some cases export(), as a tuple (url, rev_options). """ - url, rev = self.get_url_rev(url) - url, extra_args = self.get_url_rev_args(url) + url, rev, user_pass = self.get_url_rev_and_auth(url) + username, password = user_pass + extra_args = self.make_rev_args(username, password) rev_options = self.make_rev_options(rev, extra_args=extra_args) return url, rev_options - def get_info(self, location): - """ - Returns (url, revision), where both are strings - """ - assert not location.rstrip('/').endswith(self.dirname), \ - 'Bad directory: %s' % location - return self.get_url(location), self.get_revision(location) - def normalize_url(self, url): """ Normalize a URL for comparison by unquoting it and removing any @@ -291,7 +299,7 @@ class VersionControl(object): """ raise NotImplementedError - def update(self, dest, rev_options): + def update(self, dest, url, rev_options): """ Update an already-existing repo to the given ``rev_options``. @@ -341,7 +349,7 @@ class VersionControl(object): self.repo_name, rev_display, ) - self.update(dest, rev_options) + self.update(dest, url, rev_options) else: logger.info('Skipping because already up-to-date.') return @@ -421,8 +429,6 @@ class VersionControl(object): def get_url(self, location): """ Return the url used at location - - This is used in get_info() and obtain(). """ raise NotImplementedError diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/bazaar.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/bazaar.py index 93b7616..3cc66c9 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/bazaar.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/bazaar.py @@ -6,7 +6,9 @@ import os from pip._vendor.six.moves.urllib import parse as urllib_parse from pip._internal.download import path_to_url -from pip._internal.utils.misc import display_path, rmtree +from pip._internal.utils.misc import ( + display_path, make_vcs_requirement_url, rmtree, +) from pip._internal.utils.temp_dir import TempDirectory from pip._internal.vcs import VersionControl, vcs @@ -62,16 +64,16 @@ class Bazaar(VersionControl): def switch(self, dest, url, rev_options): self.run_command(['switch', url], cwd=dest) - def update(self, dest, rev_options): + def update(self, dest, url, rev_options): cmd_args = ['pull', '-q'] + rev_options.to_args() self.run_command(cmd_args, cwd=dest) - def get_url_rev(self, url): + def get_url_rev_and_auth(self, url): # hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it - url, rev = super(Bazaar, self).get_url_rev(url) + url, rev, user_pass = super(Bazaar, self).get_url_rev_and_auth(url) if url.startswith('ssh://'): url = 'bzr+' + url - return url, rev + return url, rev, user_pass def get_url(self, location): urls = self.run_command(['info'], show_stdout=False, cwd=location) @@ -98,9 +100,9 @@ class Bazaar(VersionControl): return None if not repo.lower().startswith('bzr:'): repo = 'bzr+' + repo - egg_project_name = dist.egg_name().split('-', 1)[0] current_rev = self.get_revision(location) - return '%s@%s#egg=%s' % (repo, current_rev, egg_project_name) + egg_project_name = dist.egg_name().split('-', 1)[0] + return make_vcs_requirement_url(repo, current_rev, egg_project_name) def is_commit_id_equal(self, dest, name): """Always assume the versions don't match""" diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/git.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/git.py index 9ee2e01..9778539 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/git.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/git.py @@ -8,9 +8,9 @@ from pip._vendor.packaging.version import parse as parse_version from pip._vendor.six.moves.urllib import parse as urllib_parse from pip._vendor.six.moves.urllib import request as urllib_request -from pip._internal.compat import samefile from pip._internal.exceptions import BadCommand -from pip._internal.utils.misc import display_path +from pip._internal.utils.compat import samefile +from pip._internal.utils.misc import display_path, make_vcs_requirement_url from pip._internal.utils.temp_dir import TempDirectory from pip._internal.vcs import VersionControl, vcs @@ -77,6 +77,20 @@ class Git(VersionControl): version = '.'.join(version.split('.')[:3]) return parse_version(version) + def get_branch(self, location): + """ + Return the current branch, or None if HEAD isn't at a branch + (e.g. detached HEAD). + """ + args = ['rev-parse', '--abbrev-ref', 'HEAD'] + output = self.run_command(args, show_stdout=False, cwd=location) + branch = output.strip() + + if branch == 'HEAD': + return None + + return branch + def export(self, location): """Export the Git repository at the url to the destination location""" if not location.endswith('/'): @@ -91,8 +105,8 @@ class Git(VersionControl): def get_revision_sha(self, dest, rev): """ - Return a commit hash for the given revision if it names a remote - branch or tag. Otherwise, return None. + Return (sha_or_none, is_branch), where sha_or_none is a commit hash + if the revision names a remote branch or tag, otherwise None. Args: dest: the repository directory. @@ -115,22 +129,30 @@ class Git(VersionControl): branch_ref = 'refs/remotes/origin/{}'.format(rev) tag_ref = 'refs/tags/{}'.format(rev) - return refs.get(branch_ref) or refs.get(tag_ref) + sha = refs.get(branch_ref) + if sha is not None: + return (sha, True) - def check_rev_options(self, dest, rev_options): - """Check the revision options before checkout. + sha = refs.get(tag_ref) - Returns a new RevOptions object for the SHA1 of the branch or tag - if found. + return (sha, False) + + def resolve_revision(self, dest, url, rev_options): + """ + Resolve a revision to a new RevOptions object with the SHA1 of the + branch, tag, or ref if found. Args: rev_options: a RevOptions object. """ rev = rev_options.arg_rev - sha = self.get_revision_sha(dest, rev) + sha, is_branch = self.get_revision_sha(dest, rev) if sha is not None: - return rev_options.make_new(sha) + rev_options = rev_options.make_new(sha) + rev_options.branch_name = rev if is_branch else None + + return rev_options # Do not show a warning for the common case of something that has # the form of a Git commit hash. @@ -139,6 +161,19 @@ class Git(VersionControl): "Did not find branch or tag '%s', assuming revision or ref.", rev, ) + + if not rev.startswith('refs/'): + return rev_options + + # If it looks like a ref, we have to fetch it explicitly. + self.run_command( + ['fetch', '-q', url] + rev_options.to_args(), + cwd=dest, + ) + # Change the revision to the SHA of the ref we fetched + sha = self.get_revision(dest, rev='FETCH_HEAD') + rev_options = rev_options.make_new(sha) + return rev_options def is_commit_id_equal(self, dest, name): @@ -164,20 +199,22 @@ class Git(VersionControl): if rev_options.rev: # Then a specific revision was requested. - rev_options = self.check_rev_options(dest, rev_options) - # Only do a checkout if the current commit id doesn't match - # the requested revision. - if not self.is_commit_id_equal(dest, rev_options.rev): - rev = rev_options.rev - # Only fetch the revision if it's a ref - if rev.startswith('refs/'): - self.run_command( - ['fetch', '-q', url] + rev_options.to_args(), - cwd=dest, - ) - # Change the revision to the SHA of the ref we fetched - rev = 'FETCH_HEAD' - self.run_command(['checkout', '-q', rev], cwd=dest) + rev_options = self.resolve_revision(dest, url, rev_options) + branch_name = getattr(rev_options, 'branch_name', None) + if branch_name is None: + # Only do a checkout if the current commit id doesn't match + # the requested revision. + if not self.is_commit_id_equal(dest, rev_options.rev): + cmd_args = ['checkout', '-q'] + rev_options.to_args() + self.run_command(cmd_args, cwd=dest) + elif self.get_branch(dest) != branch_name: + # Then a specific branch was requested, and that branch + # is not yet checked out. + track_branch = 'origin/{}'.format(branch_name) + cmd_args = [ + 'checkout', '-b', branch_name, '--track', track_branch, + ] + self.run_command(cmd_args, cwd=dest) #: repo may contain submodules self.update_submodules(dest) @@ -189,7 +226,7 @@ class Git(VersionControl): self.update_submodules(dest) - def update(self, dest, rev_options): + def update(self, dest, url, rev_options): # First fetch changes from the default remote if self.get_git_version() >= parse_version('1.9.0'): # fetch tags in addition to everything else @@ -197,7 +234,7 @@ class Git(VersionControl): else: self.run_command(['fetch', '-q'], cwd=dest) # Then reset to wanted revision (maybe even origin/master) - rev_options = self.check_rev_options(dest, rev_options) + rev_options = self.resolve_revision(dest, url, rev_options) cmd_args = ['reset', '--hard', '-q'] + rev_options.to_args() self.run_command(cmd_args, cwd=dest) #: update submodules @@ -218,9 +255,11 @@ class Git(VersionControl): url = found_remote.split(' ')[1] return url.strip() - def get_revision(self, location): + def get_revision(self, location, rev=None): + if rev is None: + rev = 'HEAD' current_rev = self.run_command( - ['rev-parse', 'HEAD'], show_stdout=False, cwd=location, + ['rev-parse', rev], show_stdout=False, cwd=location, ) return current_rev.strip() @@ -255,17 +294,15 @@ class Git(VersionControl): repo = self.get_url(location) if not repo.lower().startswith('git:'): repo = 'git+' + repo - egg_project_name = dist.egg_name().split('-', 1)[0] - if not repo: - return None current_rev = self.get_revision(location) - req = '%s@%s#egg=%s' % (repo, current_rev, egg_project_name) - subdirectory = self._get_subdirectory(location) - if subdirectory: - req += '&subdirectory=' + subdirectory + egg_project_name = dist.egg_name().split('-', 1)[0] + subdir = self._get_subdirectory(location) + req = make_vcs_requirement_url(repo, current_rev, egg_project_name, + subdir=subdir) + return req - def get_url_rev(self, url): + def get_url_rev_and_auth(self, url): """ Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. That's required because although they use SSH they sometimes don't @@ -275,12 +312,12 @@ class Git(VersionControl): if '://' not in url: assert 'file:' not in url url = url.replace('git+', 'git+ssh://') - url, rev = super(Git, self).get_url_rev(url) + url, rev, user_pass = super(Git, self).get_url_rev_and_auth(url) url = url.replace('ssh://', '') else: - url, rev = super(Git, self).get_url_rev(url) + url, rev, user_pass = super(Git, self).get_url_rev_and_auth(url) - return url, rev + return url, rev, user_pass def update_submodules(self, location): if not os.path.exists(os.path.join(location, '.gitmodules')): diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/mercurial.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/mercurial.py index 2d0750c..17cfb67 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/mercurial.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/mercurial.py @@ -6,7 +6,7 @@ import os from pip._vendor.six.moves import configparser from pip._internal.download import path_to_url -from pip._internal.utils.misc import display_path +from pip._internal.utils.misc import display_path, make_vcs_requirement_url from pip._internal.utils.temp_dir import TempDirectory from pip._internal.vcs import VersionControl, vcs @@ -59,7 +59,7 @@ class Mercurial(VersionControl): cmd_args = ['update', '-q'] + rev_options.to_args() self.run_command(cmd_args, cwd=dest) - def update(self, dest, rev_options): + def update(self, dest, url, rev_options): self.run_command(['pull', '-q'], cwd=dest) cmd_args = ['update', '-q'] + rev_options.to_args() self.run_command(cmd_args, cwd=dest) @@ -88,11 +88,10 @@ class Mercurial(VersionControl): repo = self.get_url(location) if not repo.lower().startswith('hg:'): repo = 'hg+' + repo - egg_project_name = dist.egg_name().split('-', 1)[0] - if not repo: - return None current_rev_hash = self.get_revision_hash(location) - return '%s@%s#egg=%s' % (repo, current_rev_hash, egg_project_name) + egg_project_name = dist.egg_name().split('-', 1)[0] + return make_vcs_requirement_url(repo, current_rev_hash, + egg_project_name) def is_commit_id_equal(self, dest, name): """Always assume the versions don't match""" diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/subversion.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/subversion.py index 7078775..6f7cb5d 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/subversion.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/vcs/subversion.py @@ -4,17 +4,15 @@ import logging import os import re -from pip._vendor.six.moves.urllib import parse as urllib_parse - -from pip._internal.index import Link +from pip._internal.models.link import Link from pip._internal.utils.logging import indent_log -from pip._internal.utils.misc import display_path, remove_auth_from_url, rmtree +from pip._internal.utils.misc import ( + display_path, make_vcs_requirement_url, rmtree, split_auth_from_netloc, +) from pip._internal.vcs import VersionControl, vcs _svn_xml_url_re = re.compile('url="([^"]+)"') _svn_rev_re = re.compile(r'committed-rev="(\d+)"') -_svn_url_re = re.compile(r'URL: (.+)') -_svn_revision_re = re.compile(r'Revision: (.+)') _svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"') _svn_info_xml_url_re = re.compile(r'(.*)') @@ -31,34 +29,6 @@ class Subversion(VersionControl): def get_base_rev_args(self, rev): return ['-r', rev] - def get_info(self, location): - """Returns (url, revision), where both are strings""" - assert not location.rstrip('/').endswith(self.dirname), \ - 'Bad directory: %s' % location - output = self.run_command( - ['info', location], - show_stdout=False, - extra_environ={'LANG': 'C'}, - ) - match = _svn_url_re.search(output) - if not match: - logger.warning( - 'Cannot determine URL of svn checkout %s', - display_path(location), - ) - logger.debug('Output that cannot be parsed: \n%s', output) - return None, None - url = match.group(1).strip() - match = _svn_revision_re.search(output) - if not match: - logger.warning( - 'Cannot determine revision of svn checkout %s', - display_path(location), - ) - logger.debug('Output that cannot be parsed: \n%s', output) - return url, None - return url, match.group(1) - def export(self, location): """Export the svn repository at the url to the destination location""" url, rev_options = self.get_url_rev_options(self.url) @@ -87,7 +57,7 @@ class Subversion(VersionControl): cmd_args = ['switch'] + rev_options.to_args() + [url, dest] self.run_command(cmd_args) - def update(self, dest, rev_options): + def update(self, dest, url, rev_options): cmd_args = ['update'] + rev_options.to_args() + [dest] self.run_command(cmd_args) @@ -132,18 +102,34 @@ class Subversion(VersionControl): revision = max(revision, localrev) return revision - def get_url_rev(self, url): + def get_netloc_and_auth(self, netloc, scheme): + """ + This override allows the auth information to be passed to svn via the + --username and --password options instead of via the URL. + """ + if scheme == 'ssh': + # The --username and --password options can't be used for + # svn+ssh URLs, so keep the auth information in the URL. + return super(Subversion, self).get_netloc_and_auth( + netloc, scheme) + + return split_auth_from_netloc(netloc) + + def get_url_rev_and_auth(self, url): # hotfix the URL scheme after removing svn+ from svn+ssh:// readd it - url, rev = super(Subversion, self).get_url_rev(url) + url, rev, user_pass = super(Subversion, self).get_url_rev_and_auth(url) if url.startswith('ssh://'): url = 'svn+' + url - return url, rev + return url, rev, user_pass - def get_url_rev_args(self, url): - extra_args = get_rev_options_args(url) - url = remove_auth_from_url(url) + def make_rev_args(self, username, password): + extra_args = [] + if username: + extra_args += ['--username', username] + if password: + extra_args += ['--password', password] - return url, extra_args + return extra_args def get_url(self, location): # In cases where the source is in a subdirectory, not alongside @@ -213,42 +199,15 @@ class Subversion(VersionControl): repo = self.get_url(location) if repo is None: return None + repo = 'svn+' + repo + rev = self.get_revision(location) # FIXME: why not project name? egg_project_name = dist.egg_name().split('-', 1)[0] - rev = self.get_revision(location) - return 'svn+%s@%s#egg=%s' % (repo, rev, egg_project_name) + return make_vcs_requirement_url(repo, rev, egg_project_name) def is_commit_id_equal(self, dest, name): """Always assume the versions don't match""" return False -def get_rev_options_args(url): - """ - Return the extra arguments to pass to RevOptions. - """ - r = urllib_parse.urlsplit(url) - if hasattr(r, 'username'): - # >= Python-2.5 - username, password = r.username, r.password - else: - netloc = r[1] - if '@' in netloc: - auth = netloc.split('@')[0] - if ':' in auth: - username, password = auth.split(':', 1) - else: - username, password = auth, None - else: - username, password = None, None - - extra_args = [] - if username: - extra_args += ['--username', username] - if password: - extra_args += ['--password', password] - - return extra_args - - vcs.register(Subversion) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_internal/wheel.py b/thesisenv/lib/python3.6/site-packages/pip/_internal/wheel.py index fcf9d3d..5ce890e 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_internal/wheel.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_internal/wheel.py @@ -475,7 +475,7 @@ if __name__ == '__main__': if warn_script_location: msg = message_about_scripts_not_on_PATH(generated_console_scripts) if msg is not None: - logger.warn(msg) + logger.warning(msg) if len(gui) > 0: generated.extend( @@ -500,16 +500,19 @@ if __name__ == '__main__': with open_for_csv(temp_record, 'w+') as record_out: reader = csv.reader(record_in) writer = csv.writer(record_out) + outrows = [] for row in reader: row[0] = installed.pop(row[0], row[0]) if row[0] in changed: row[1], row[2] = rehash(row[0]) - writer.writerow(row) + outrows.append(tuple(row)) for f in generated: digest, length = rehash(f) - writer.writerow((normpath(f, lib_dir), digest, length)) + outrows.append((normpath(f, lib_dir), digest, length)) for f in installed: - writer.writerow((installed[f], '', '')) + outrows.append((installed[f], '', '')) + for row in sorted(outrows): + writer.writerow(row) shutil.move(temp_record, record) @@ -710,6 +713,7 @@ class WheelBuilder(object): :return: True if all the wheels built correctly. """ from pip._internal import index + from pip._internal.models.link import Link building_is_possible = self._wheel_dir or ( autobuilding and self.wheel_cache.cache_dir @@ -717,6 +721,7 @@ class WheelBuilder(object): assert building_is_possible buildset = [] + format_control = self.finder.format_control for req in requirements: if req.constraint: continue @@ -740,8 +745,7 @@ class WheelBuilder(object): if index.egg_info_matches(base, None, link) is None: # E.g. local directory. Build wheel just for this run. ephem_cache = True - if "binary" not in index.fmt_ctl_formats( - self.finder.format_control, + if "binary" not in format_control.get_allowed_formats( canonicalize_name(req.name)): logger.info( "Skipping bdist_wheel for %s, due to binaries " @@ -802,7 +806,7 @@ class WheelBuilder(object): self.preparer.build_dir ) # Update the link for this. - req.link = index.Link(path_to_url(wheel_file)) + req.link = Link(path_to_url(wheel_file)) assert req.link.is_wheel # extract the wheel into the dir unpack_url( diff --git a/thesisenv/lib/python3.6/site-packages/pip/_vendor/certifi/__init__.py b/thesisenv/lib/python3.6/site-packages/pip/_vendor/certifi/__init__.py index 0c4963e..aa329fb 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_vendor/certifi/__init__.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_vendor/certifi/__init__.py @@ -1,3 +1,3 @@ from .core import where, old_where -__version__ = "2018.04.16" +__version__ = "2018.08.24" diff --git a/thesisenv/lib/python3.6/site-packages/pip/_vendor/certifi/__main__.py b/thesisenv/lib/python3.6/site-packages/pip/_vendor/certifi/__main__.py index 5f1da0d..ae2aff5 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_vendor/certifi/__main__.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_vendor/certifi/__main__.py @@ -1,2 +1,2 @@ -from certifi import where +from pip._vendor.certifi import where print(where()) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_vendor/certifi/cacert.pem b/thesisenv/lib/python3.6/site-packages/pip/_vendor/certifi/cacert.pem index 2713f54..85de024 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_vendor/certifi/cacert.pem +++ b/thesisenv/lib/python3.6/site-packages/pip/_vendor/certifi/cacert.pem @@ -3692,169 +3692,6 @@ lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR -----END CERTIFICATE----- -# Issuer: CN=Certplus Root CA G1 O=Certplus -# Subject: CN=Certplus Root CA G1 O=Certplus -# Label: "Certplus Root CA G1" -# Serial: 1491911565779898356709731176965615564637713 -# MD5 Fingerprint: 7f:09:9c:f7:d9:b9:5c:69:69:56:d5:37:3e:14:0d:42 -# SHA1 Fingerprint: 22:fd:d0:b7:fd:a2:4e:0d:ac:49:2c:a0:ac:a6:7b:6a:1f:e3:f7:66 -# SHA256 Fingerprint: 15:2a:40:2b:fc:df:2c:d5:48:05:4d:22:75:b3:9c:7f:ca:3e:c0:97:80:78:b0:f0:ea:76:e5:61:a6:c7:43:3e ------BEGIN CERTIFICATE----- -MIIFazCCA1OgAwIBAgISESBVg+QtPlRWhS2DN7cs3EYRMA0GCSqGSIb3DQEBDQUA -MD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2Vy -dHBsdXMgUm9vdCBDQSBHMTAeFw0xNDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBa -MD4xCzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2Vy -dHBsdXMgUm9vdCBDQSBHMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB -ANpQh7bauKk+nWT6VjOaVj0W5QOVsjQcmm1iBdTYj+eJZJ+622SLZOZ5KmHNr49a -iZFluVj8tANfkT8tEBXgfs+8/H9DZ6itXjYj2JizTfNDnjl8KvzsiNWI7nC9hRYt -6kuJPKNxQv4c/dMcLRC4hlTqQ7jbxofaqK6AJc96Jh2qkbBIb6613p7Y1/oA/caP -0FG7Yn2ksYyy/yARujVjBYZHYEMzkPZHogNPlk2dT8Hq6pyi/jQu3rfKG3akt62f -6ajUeD94/vI4CTYd0hYCyOwqaK/1jpTvLRN6HkJKHRUxrgwEV/xhc/MxVoYxgKDE -EW4wduOU8F8ExKyHcomYxZ3MVwia9Az8fXoFOvpHgDm2z4QTd28n6v+WZxcIbekN -1iNQMLAVdBM+5S//Ds3EC0pd8NgAM0lm66EYfFkuPSi5YXHLtaW6uOrc4nBvCGrc -h2c0798wct3zyT8j/zXhviEpIDCB5BmlIOklynMxdCm+4kLV87ImZsdo/Rmz5yCT -mehd4F6H50boJZwKKSTUzViGUkAksnsPmBIgJPaQbEfIDbsYIC7Z/fyL8inqh3SV -4EJQeIQEQWGw9CEjjy3LKCHyamz0GqbFFLQ3ZU+V/YDI+HLlJWvEYLF7bY5KinPO -WftwenMGE9nTdDckQQoRb5fc5+R+ob0V8rqHDz1oihYHAgMBAAGjYzBhMA4GA1Ud -DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSowcCbkahDFXxd -Bie0KlHYlwuBsTAfBgNVHSMEGDAWgBSowcCbkahDFXxdBie0KlHYlwuBsTANBgkq -hkiG9w0BAQ0FAAOCAgEAnFZvAX7RvUz1isbwJh/k4DgYzDLDKTudQSk0YcbX8ACh -66Ryj5QXvBMsdbRX7gp8CXrc1cqh0DQT+Hern+X+2B50ioUHj3/MeXrKls3N/U/7 -/SMNkPX0XtPGYX2eEeAC7gkE2Qfdpoq3DIMku4NQkv5gdRE+2J2winq14J2by5BS -S7CTKtQ+FjPlnsZlFT5kOwQ/2wyPX1wdaR+v8+khjPPvl/aatxm2hHSco1S1cE5j -2FddUyGbQJJD+tZ3VTNPZNX70Cxqjm0lpu+F6ALEUz65noe8zDUa3qHpimOHZR4R -Kttjd5cUvpoUmRGywO6wT/gUITJDT5+rosuoD6o7BlXGEilXCNQ314cnrUlZp5Gr -RHpejXDbl85IULFzk/bwg2D5zfHhMf1bfHEhYxQUqq/F3pN+aLHsIqKqkHWetUNy -6mSjhEv9DKgma3GX7lZjZuhCVPnHHd/Qj1vfyDBviP4NxDMcU6ij/UgQ8uQKTuEV -V/xuZDDCVRHc6qnNSlSsKWNEz0pAoNZoWRsz+e86i9sgktxChL8Bq4fA1SCC28a5 -g4VCXA9DO2pJNdWY9BW/+mGBDAkgGNLQFwzLSABQ6XaCjGTXOqAHVcweMcDvOrRl -++O/QmueD6i9a5jc2NvLi6Td11n0bt3+qsOR0C5CB8AMTVPNJLFMWx5R9N/pkvo= ------END CERTIFICATE----- - -# Issuer: CN=Certplus Root CA G2 O=Certplus -# Subject: CN=Certplus Root CA G2 O=Certplus -# Label: "Certplus Root CA G2" -# Serial: 1492087096131536844209563509228951875861589 -# MD5 Fingerprint: a7:ee:c4:78:2d:1b:ee:2d:b9:29:ce:d6:a7:96:32:31 -# SHA1 Fingerprint: 4f:65:8e:1f:e9:06:d8:28:02:e9:54:47:41:c9:54:25:5d:69:cc:1a -# SHA256 Fingerprint: 6c:c0:50:41:e6:44:5e:74:69:6c:4c:fb:c9:f8:0f:54:3b:7e:ab:bb:44:b4:ce:6f:78:7c:6a:99:71:c4:2f:17 ------BEGIN CERTIFICATE----- -MIICHDCCAaKgAwIBAgISESDZkc6uo+jF5//pAq/Pc7xVMAoGCCqGSM49BAMDMD4x -CzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBs -dXMgUm9vdCBDQSBHMjAeFw0xNDA1MjYwMDAwMDBaFw0zODAxMTUwMDAwMDBaMD4x -CzAJBgNVBAYTAkZSMREwDwYDVQQKDAhDZXJ0cGx1czEcMBoGA1UEAwwTQ2VydHBs -dXMgUm9vdCBDQSBHMjB2MBAGByqGSM49AgEGBSuBBAAiA2IABM0PW1aC3/BFGtat -93nwHcmsltaeTpwftEIRyoa/bfuFo8XlGVzX7qY/aWfYeOKmycTbLXku54uNAm8x -Ik0G42ByRZ0OQneezs/lf4WbGOT8zC5y0xaTTsqZY1yhBSpsBqNjMGEwDgYDVR0P -AQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNqDYwJ5jtpMxjwj -FNiPwyCrKGBZMB8GA1UdIwQYMBaAFNqDYwJ5jtpMxjwjFNiPwyCrKGBZMAoGCCqG -SM49BAMDA2gAMGUCMHD+sAvZ94OX7PNVHdTcswYO/jOYnYs5kGuUIe22113WTNch -p+e/IQ8rzfcq3IUHnQIxAIYUFuXcsGXCwI4Un78kFmjlvPl5adytRSv3tjFzzAal -U5ORGpOucGpnutee5WEaXw== ------END CERTIFICATE----- - -# Issuer: CN=OpenTrust Root CA G1 O=OpenTrust -# Subject: CN=OpenTrust Root CA G1 O=OpenTrust -# Label: "OpenTrust Root CA G1" -# Serial: 1492036577811947013770400127034825178844775 -# MD5 Fingerprint: 76:00:cc:81:29:cd:55:5e:88:6a:7a:2e:f7:4d:39:da -# SHA1 Fingerprint: 79:91:e8:34:f7:e2:ee:dd:08:95:01:52:e9:55:2d:14:e9:58:d5:7e -# SHA256 Fingerprint: 56:c7:71:28:d9:8c:18:d9:1b:4c:fd:ff:bc:25:ee:91:03:d4:75:8e:a2:ab:ad:82:6a:90:f3:45:7d:46:0e:b4 ------BEGIN CERTIFICATE----- -MIIFbzCCA1egAwIBAgISESCzkFU5fX82bWTCp59rY45nMA0GCSqGSIb3DQEBCwUA -MEAxCzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9w -ZW5UcnVzdCBSb290IENBIEcxMB4XDTE0MDUyNjA4NDU1MFoXDTM4MDExNTAwMDAw -MFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwU -T3BlblRydXN0IFJvb3QgQ0EgRzEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK -AoICAQD4eUbalsUwXopxAy1wpLuwxQjczeY1wICkES3d5oeuXT2R0odsN7faYp6b -wiTXj/HbpqbfRm9RpnHLPhsxZ2L3EVs0J9V5ToybWL0iEA1cJwzdMOWo010hOHQX -/uMftk87ay3bfWAfjH1MBcLrARYVmBSO0ZB3Ij/swjm4eTrwSSTilZHcYTSSjFR0 -77F9jAHiOH3BX2pfJLKOYheteSCtqx234LSWSE9mQxAGFiQD4eCcjsZGT44ameGP -uY4zbGneWK2gDqdkVBFpRGZPTBKnjix9xNRbxQA0MMHZmf4yzgeEtE7NCv82TWLx -p2NX5Ntqp66/K7nJ5rInieV+mhxNaMbBGN4zK1FGSxyO9z0M+Yo0FMT7MzUj8czx -Kselu7Cizv5Ta01BG2Yospb6p64KTrk5M0ScdMGTHPjgniQlQ/GbI4Kq3ywgsNw2 -TgOzfALU5nsaqocTvz6hdLubDuHAk5/XpGbKuxs74zD0M1mKB3IDVedzagMxbm+W -G+Oin6+Sx+31QrclTDsTBM8clq8cIqPQqwWyTBIjUtz9GVsnnB47ev1CI9sjgBPw -vFEVVJSmdz7QdFG9URQIOTfLHzSpMJ1ShC5VkLG631UAC9hWLbFJSXKAqWLXwPYY -EQRVzXR7z2FwefR7LFxckvzluFqrTJOVoSfupb7PcSNCupt2LQIDAQABo2MwYTAO -BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUl0YhVyE1 -2jZVx/PxN3DlCPaTKbYwHwYDVR0jBBgwFoAUl0YhVyE12jZVx/PxN3DlCPaTKbYw -DQYJKoZIhvcNAQELBQADggIBAB3dAmB84DWn5ph76kTOZ0BP8pNuZtQ5iSas000E -PLuHIT839HEl2ku6q5aCgZG27dmxpGWX4m9kWaSW7mDKHyP7Rbr/jyTwyqkxf3kf -gLMtMrpkZ2CvuVnN35pJ06iCsfmYlIrM4LvgBBuZYLFGZdwIorJGnkSI6pN+VxbS -FXJfLkur1J1juONI5f6ELlgKn0Md/rcYkoZDSw6cMoYsYPXpSOqV7XAp8dUv/TW0 -V8/bhUiZucJvbI/NeJWsZCj9VrDDb8O+WVLhX4SPgPL0DTatdrOjteFkdjpY3H1P -XlZs5VVZV6Xf8YpmMIzUUmI4d7S+KNfKNsSbBfD4Fdvb8e80nR14SohWZ25g/4/I -i+GOvUKpMwpZQhISKvqxnUOOBZuZ2mKtVzazHbYNeS2WuOvyDEsMpZTGMKcmGS3t -TAZQMPH9WD25SxdfGbRqhFS0OE85og2WaMMolP3tLR9Ka0OWLpABEPs4poEL0L91 -09S5zvE/bw4cHjdx5RiHdRk/ULlepEU0rbDK5uUTdg8xFKmOLZTW1YVNcxVPS/Ky -Pu1svf0OnWZzsD2097+o4BGkxK51CUpjAEggpsadCwmKtODmzj7HPiY46SvepghJ -AwSQiumPv+i2tCqjI40cHLI5kqiPAlxAOXXUc0ECd97N4EOH1uS6SsNsEn/+KuYj -1oxx ------END CERTIFICATE----- - -# Issuer: CN=OpenTrust Root CA G2 O=OpenTrust -# Subject: CN=OpenTrust Root CA G2 O=OpenTrust -# Label: "OpenTrust Root CA G2" -# Serial: 1492012448042702096986875987676935573415441 -# MD5 Fingerprint: 57:24:b6:59:24:6b:ae:c8:fe:1c:0c:20:f2:c0:4e:eb -# SHA1 Fingerprint: 79:5f:88:60:c5:ab:7c:3d:92:e6:cb:f4:8d:e1:45:cd:11:ef:60:0b -# SHA256 Fingerprint: 27:99:58:29:fe:6a:75:15:c1:bf:e8:48:f9:c4:76:1d:b1:6c:22:59:29:25:7b:f4:0d:08:94:f2:9e:a8:ba:f2 ------BEGIN CERTIFICATE----- -MIIFbzCCA1egAwIBAgISESChaRu/vbm9UpaPI+hIvyYRMA0GCSqGSIb3DQEBDQUA -MEAxCzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9w -ZW5UcnVzdCBSb290IENBIEcyMB4XDTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAw -MFowQDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwU -T3BlblRydXN0IFJvb3QgQ0EgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK -AoICAQDMtlelM5QQgTJT32F+D3Y5z1zCU3UdSXqWON2ic2rxb95eolq5cSG+Ntmh -/LzubKh8NBpxGuga2F8ORAbtp+Dz0mEL4DKiltE48MLaARf85KxP6O6JHnSrT78e -CbY2albz4e6WiWYkBuTNQjpK3eCasMSCRbP+yatcfD7J6xcvDH1urqWPyKwlCm/6 -1UWY0jUJ9gNDlP7ZvyCVeYCYitmJNbtRG6Q3ffyZO6v/v6wNj0OxmXsWEH4db0fE -FY8ElggGQgT4hNYdvJGmQr5J1WqIP7wtUdGejeBSzFfdNTVY27SPJIjki9/ca1TS -gSuyzpJLHB9G+h3Ykst2Z7UJmQnlrBcUVXDGPKBWCgOz3GIZ38i1MH/1PCZ1Eb3X -G7OHngevZXHloM8apwkQHZOJZlvoPGIytbU6bumFAYueQ4xncyhZW+vj3CzMpSZy -YhK05pyDRPZRpOLAeiRXyg6lPzq1O4vldu5w5pLeFlwoW5cZJ5L+epJUzpM5ChaH -vGOz9bGTXOBut9Dq+WIyiET7vycotjCVXRIouZW+j1MY5aIYFuJWpLIsEPUdN6b4 -t/bQWVyJ98LVtZR00dX+G7bw5tYee9I8y6jj9RjzIR9u701oBnstXW5DiabA+aC/ -gh7PU3+06yzbXfZqfUAkBXKJOAGTy3HCOV0GEfZvePg3DTmEJwIDAQABo2MwYTAO -BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUajn6QiL3 -5okATV59M4PLuG53hq8wHwYDVR0jBBgwFoAUajn6QiL35okATV59M4PLuG53hq8w -DQYJKoZIhvcNAQENBQADggIBAJjLq0A85TMCl38th6aP1F5Kr7ge57tx+4BkJamz -Gj5oXScmp7oq4fBXgwpkTx4idBvpkF/wrM//T2h6OKQQbA2xx6R3gBi2oihEdqc0 -nXGEL8pZ0keImUEiyTCYYW49qKgFbdEfwFFEVn8nNQLdXpgKQuswv42hm1GqO+qT -RmTFAHneIWv2V6CG1wZy7HBGS4tz3aAhdT7cHcCP009zHIXZ/n9iyJVvttN7jLpT -wm+bREx50B1ws9efAvSyB7DH5fitIw6mVskpEndI2S9G/Tvw/HRwkqWOOAgfZDC2 -t0v7NqwQjqBSM2OdAzVWxWm9xiNaJ5T2pBL4LTM8oValX9YZ6e18CL13zSdkzJTa -TkZQh+D5wVOAHrut+0dSixv9ovneDiK3PTNZbNTe9ZUGMg1RGUFcPk8G97krgCf2 -o6p6fAbhQ8MTOWIaNr3gKC6UAuQpLmBVrkA9sHSSXvAgZJY/X0VdiLWK2gKgW0VU -3jg9CcCoSmVGFvyqv1ROTVu+OEO3KMqLM6oaJbolXCkvW0pujOotnCr2BXbgd5eA -iN1nE28daCSLT7d0geX0YJ96Vdc+N9oWaz53rK4YcJUIeSkDiv7BO7M/Gg+kO14f -WKGVyasvc0rQLW6aWQ9VGHgtPFGml4vmu7JwqkwR3v98KzfUetF3NI/n+UL3PIEM -S1IK ------END CERTIFICATE----- - -# Issuer: CN=OpenTrust Root CA G3 O=OpenTrust -# Subject: CN=OpenTrust Root CA G3 O=OpenTrust -# Label: "OpenTrust Root CA G3" -# Serial: 1492104908271485653071219941864171170455615 -# MD5 Fingerprint: 21:37:b4:17:16:92:7b:67:46:70:a9:96:d7:a8:13:24 -# SHA1 Fingerprint: 6e:26:64:f3:56:bf:34:55:bf:d1:93:3f:7c:01:de:d8:13:da:8a:a6 -# SHA256 Fingerprint: b7:c3:62:31:70:6e:81:07:8c:36:7c:b8:96:19:8f:1e:32:08:dd:92:69:49:dd:8f:57:09:a4:10:f7:5b:62:92 ------BEGIN CERTIFICATE----- -MIICITCCAaagAwIBAgISESDm+Ez8JLC+BUCs2oMbNGA/MAoGCCqGSM49BAMDMEAx -CzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlPcGVuVHJ1c3QxHTAbBgNVBAMMFE9wZW5U -cnVzdCBSb290IENBIEczMB4XDTE0MDUyNjAwMDAwMFoXDTM4MDExNTAwMDAwMFow -QDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCU9wZW5UcnVzdDEdMBsGA1UEAwwUT3Bl -blRydXN0IFJvb3QgQ0EgRzMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAARK7liuTcpm -3gY6oxH84Bjwbhy6LTAMidnW7ptzg6kjFYwvWYpa3RTqnVkrQ7cG7DK2uu5Bta1d -oYXM6h0UZqNnfkbilPPntlahFVmhTzeXuSIevRHr9LIfXsMUmuXZl5mjYzBhMA4G -A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRHd8MUi2I5 -DMlv4VBN0BBY3JWIbTAfBgNVHSMEGDAWgBRHd8MUi2I5DMlv4VBN0BBY3JWIbTAK -BggqhkjOPQQDAwNpADBmAjEAj6jcnboMBBf6Fek9LykBl7+BFjNAk2z8+e2AcG+q -j9uEwov1NcoG3GRvaBbhj5G5AjEA2Euly8LQCGzpGPta3U1fJAuwACEl74+nBCZx -4nxp5V2a+EEfOzmTk51V6s2N8fvB ------END CERTIFICATE----- - # Issuer: CN=ISRG Root X1 O=Internet Security Research Group # Subject: CN=ISRG Root X1 O=Internet Security Research Group # Label: "ISRG Root X1" @@ -4398,3 +4235,66 @@ MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg== -----END CERTIFICATE----- + +# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6 +# Label: "GlobalSign Root CA - R6" +# Serial: 1417766617973444989252670301619537 +# MD5 Fingerprint: 4f:dd:07:e4:d4:22:64:39:1e:0c:37:42:ea:d1:c6:ae +# SHA1 Fingerprint: 80:94:64:0e:b5:a7:a1:ca:11:9c:1f:dd:d5:9f:81:02:63:a7:fb:d1 +# SHA256 Fingerprint: 2c:ab:ea:fe:37:d0:6c:a2:2a:ba:73:91:c0:03:3d:25:98:29:52:c4:53:64:73:49:76:3a:3a:b5:ad:6c:cf:69 +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEg +MB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2Jh +bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQx +MjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSNjET +MBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQssgrRI +xutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1k +ZguSgMpE3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxD +aNc9PIrFsmbVkJq3MQbFvuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJw +LnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqMPKq0pPbzlUoSB239jLKJz9CgYXfIWHSw +1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+azayOeSsJDa38O+2HBNX +k7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05OWgtH8wY2 +SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/h +bguyCLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4n +WUx2OVvq+aWh2IMP0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpY +rZxCRXluDocZXFSxZba/jJvcE+kNb7gu3GduyYsRtYQUigAZcIN5kZeR1Bonvzce +MgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNVHSMEGDAWgBSu +bAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN +nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGt +Ixg93eFyRJa0lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr61 +55wsTLxDKZmOMNOsIeDjHfrYBzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLj +vUYAGm0CuiVdjaExUd1URhxN25mW7xocBFymFe944Hn+Xds+qkxV/ZoVqW/hpvvf +cDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr3TsTjxKM4kEaSHpz +oHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB10jZp +nOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfs +pA9MRf/TuTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+v +JJUEeKgDu+6B5dpffItKoZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R +8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+tJDfLRVpOoERIyNiwmcUVhAn21klJwGW4 +5hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA= +-----END CERTIFICATE----- + +# Issuer: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed +# Subject: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed +# Label: "OISTE WISeKey Global Root GC CA" +# Serial: 44084345621038548146064804565436152554 +# MD5 Fingerprint: a9:d6:b9:2d:2f:93:64:f8:a5:69:ca:91:e9:68:07:23 +# SHA1 Fingerprint: e0:11:84:5e:34:de:be:88:81:b9:9c:f6:16:26:d1:96:1f:c3:b9:31 +# SHA256 Fingerprint: 85:60:f9:1c:36:24:da:ba:95:70:b5:fe:a0:db:e3:6f:f1:1a:83:23:be:94:86:85:4f:b3:f3:4a:55:71:19:8d +-----BEGIN CERTIFICATE----- +MIICaTCCAe+gAwIBAgIQISpWDK7aDKtARb8roi066jAKBggqhkjOPQQDAzBtMQsw +CQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91 +bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwg +Um9vdCBHQyBDQTAeFw0xNzA1MDkwOTQ4MzRaFw00MjA1MDkwOTU4MzNaMG0xCzAJ +BgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBGb3Vu +ZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2JhbCBS +b290IEdDIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAETOlQwMYPchi82PG6s4ni +eUqjFqdrVCTbUf/q9Akkwwsin8tqJ4KBDdLArzHkdIJuyiXZjHWd8dvQmqJLIX4W +p2OQ0jnUsYd4XxiWD1AbNTcPasbc2RNNpI6QN+a9WzGRo1QwUjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUSIcUrOPDnpBgOtfKie7T +rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV +57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtkAjEA2zQg +Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9 +-----END CERTIFICATE----- diff --git a/thesisenv/lib/python3.6/site-packages/pip/_vendor/packaging/__about__.py b/thesisenv/lib/python3.6/site-packages/pip/_vendor/packaging/__about__.py index 4255c5b..21fc6ce 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_vendor/packaging/__about__.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_vendor/packaging/__about__.py @@ -12,10 +12,10 @@ __title__ = "packaging" __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" -__version__ = "17.1" +__version__ = "18.0" __author__ = "Donald Stufft and individual contributors" __email__ = "donald@stufft.io" __license__ = "BSD or Apache License, Version 2.0" -__copyright__ = "Copyright 2014-2016 %s" % __author__ +__copyright__ = "Copyright 2014-2018 %s" % __author__ diff --git a/thesisenv/lib/python3.6/site-packages/pip/_vendor/packaging/requirements.py b/thesisenv/lib/python3.6/site-packages/pip/_vendor/packaging/requirements.py index 2760483..d40bd8c 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_vendor/packaging/requirements.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_vendor/packaging/requirements.py @@ -92,16 +92,16 @@ class Requirement(object): try: req = REQUIREMENT.parseString(requirement_string) except ParseException as e: - raise InvalidRequirement( - "Invalid requirement, parse error at \"{0!r}\"".format( - requirement_string[e.loc:e.loc + 8])) + raise InvalidRequirement("Parse error at \"{0!r}\": {1}".format( + requirement_string[e.loc:e.loc + 8], e.msg + )) self.name = req.name if req.url: parsed_url = urlparse.urlparse(req.url) if not (parsed_url.scheme and parsed_url.netloc) or ( not parsed_url.scheme and not parsed_url.netloc): - raise InvalidRequirement("Invalid URL given") + raise InvalidRequirement("Invalid URL: {0}".format(req.url)) self.url = req.url else: self.url = None diff --git a/thesisenv/lib/python3.6/site-packages/pip/_vendor/packaging/specifiers.py b/thesisenv/lib/python3.6/site-packages/pip/_vendor/packaging/specifiers.py index 9b6353f..4c79899 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_vendor/packaging/specifiers.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_vendor/packaging/specifiers.py @@ -503,7 +503,7 @@ class Specifier(_IndividualSpecifier): return False # Ensure that we do not allow a local version of the version mentioned - # in the specifier, which is techincally greater than, to match. + # in the specifier, which is technically greater than, to match. if prospective.local is not None: if Version(prospective.base_version) == Version(spec.base_version): return False diff --git a/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/__init__.py b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/__init__.py new file mode 100644 index 0000000..8beedea --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/__init__.py @@ -0,0 +1,4 @@ +"""Wrappers to build Python packages using PEP 517 hooks +""" + +__version__ = '0.2' diff --git a/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/_in_process.py b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/_in_process.py new file mode 100644 index 0000000..baa14d3 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/_in_process.py @@ -0,0 +1,182 @@ +"""This is invoked in a subprocess to call the build backend hooks. + +It expects: +- Command line args: hook_name, control_dir +- Environment variable: PEP517_BUILD_BACKEND=entry.point:spec +- control_dir/input.json: + - {"kwargs": {...}} + +Results: +- control_dir/output.json + - {"return_val": ...} +""" +from glob import glob +from importlib import import_module +import os +from os.path import join as pjoin +import re +import shutil +import sys + +# This is run as a script, not a module, so it can't do a relative import +import compat + +def _build_backend(): + """Find and load the build backend""" + ep = os.environ['PEP517_BUILD_BACKEND'] + mod_path, _, obj_path = ep.partition(':') + obj = import_module(mod_path) + if obj_path: + for path_part in obj_path.split('.'): + obj = getattr(obj, path_part) + return obj + +def get_requires_for_build_wheel(config_settings): + """Invoke the optional get_requires_for_build_wheel hook + + Returns [] if the hook is not defined. + """ + backend = _build_backend() + try: + hook = backend.get_requires_for_build_wheel + except AttributeError: + return [] + else: + return hook(config_settings) + +def prepare_metadata_for_build_wheel(metadata_directory, config_settings): + """Invoke optional prepare_metadata_for_build_wheel + + Implements a fallback by building a wheel if the hook isn't defined. + """ + backend = _build_backend() + try: + hook = backend.prepare_metadata_for_build_wheel + except AttributeError: + return _get_wheel_metadata_from_wheel(backend, metadata_directory, + config_settings) + else: + return hook(metadata_directory, config_settings) + +WHEEL_BUILT_MARKER = 'PEP517_ALREADY_BUILT_WHEEL' + +def _dist_info_files(whl_zip): + """Identify the .dist-info folder inside a wheel ZipFile.""" + res = [] + for path in whl_zip.namelist(): + m = re.match(r'[^/\\]+-[^/\\]+\.dist-info/', path) + if m: + res.append(path) + if res: + return res + raise Exception("No .dist-info folder found in wheel") + +def _get_wheel_metadata_from_wheel(backend, metadata_directory, config_settings): + """Build a wheel and extract the metadata from it. + + Fallback for when the build backend does not define the 'get_wheel_metadata' + hook. + """ + from zipfile import ZipFile + whl_basename = backend.build_wheel(metadata_directory, config_settings) + with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), 'wb'): + pass # Touch marker file + + whl_file = os.path.join(metadata_directory, whl_basename) + with ZipFile(whl_file) as zipf: + dist_info = _dist_info_files(zipf) + zipf.extractall(path=metadata_directory, members=dist_info) + return dist_info[0].split('/')[0] + +def _find_already_built_wheel(metadata_directory): + """Check for a wheel already built during the get_wheel_metadata hook. + """ + if not metadata_directory: + return None + metadata_parent = os.path.dirname(metadata_directory) + if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)): + return None + + whl_files = glob(os.path.join(metadata_parent, '*.whl')) + if not whl_files: + print('Found wheel built marker, but no .whl files') + return None + if len(whl_files) > 1: + print('Found multiple .whl files; unspecified behaviour. ' + 'Will call build_wheel.') + return None + + # Exactly one .whl file + return whl_files[0] + +def build_wheel(wheel_directory, config_settings, metadata_directory=None): + """Invoke the mandatory build_wheel hook. + + If a wheel was already built in the prepare_metadata_for_build_wheel fallback, this + will copy it rather than rebuilding the wheel. + """ + prebuilt_whl = _find_already_built_wheel(metadata_directory) + if prebuilt_whl: + shutil.copy2(prebuilt_whl, wheel_directory) + return os.path.basename(prebuilt_whl) + + return _build_backend().build_wheel(wheel_directory, config_settings, + metadata_directory) + + +def get_requires_for_build_sdist(config_settings): + """Invoke the optional get_requires_for_build_wheel hook + + Returns [] if the hook is not defined. + """ + backend = _build_backend() + try: + hook = backend.get_requires_for_build_sdist + except AttributeError: + return [] + else: + return hook(config_settings) + +class _DummyException(Exception): + """Nothing should ever raise this exception""" + +class GotUnsupportedOperation(Exception): + """For internal use when backend raises UnsupportedOperation""" + +def build_sdist(sdist_directory, config_settings): + """Invoke the mandatory build_sdist hook.""" + backend = _build_backend() + try: + return backend.build_sdist(sdist_directory, config_settings) + except getattr(backend, 'UnsupportedOperation', _DummyException): + raise GotUnsupportedOperation + +HOOK_NAMES = { + 'get_requires_for_build_wheel', + 'prepare_metadata_for_build_wheel', + 'build_wheel', + 'get_requires_for_build_sdist', + 'build_sdist', +} + +def main(): + if len(sys.argv) < 3: + sys.exit("Needs args: hook_name, control_dir") + hook_name = sys.argv[1] + control_dir = sys.argv[2] + if hook_name not in HOOK_NAMES: + sys.exit("Unknown hook: %s" % hook_name) + hook = globals()[hook_name] + + hook_input = compat.read_json(pjoin(control_dir, 'input.json')) + + json_out = {'unsupported': False, 'return_val': None} + try: + json_out['return_val'] = hook(**hook_input['kwargs']) + except GotUnsupportedOperation: + json_out['unsupported'] = True + + compat.write_json(json_out, pjoin(control_dir, 'output.json'), indent=2) + +if __name__ == '__main__': + main() diff --git a/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/check.py b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/check.py new file mode 100644 index 0000000..c65d51c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/check.py @@ -0,0 +1,194 @@ +"""Check a project and backend by attempting to build using PEP 517 hooks. +""" +import argparse +import logging +import os +from os.path import isfile, join as pjoin +from pip._vendor.pytoml import TomlError, load as toml_load +import shutil +from subprocess import CalledProcessError +import sys +import tarfile +from tempfile import mkdtemp +import zipfile + +from .colorlog import enable_colourful_output +from .envbuild import BuildEnvironment +from .wrappers import Pep517HookCaller + +log = logging.getLogger(__name__) + +def check_build_sdist(hooks): + with BuildEnvironment() as env: + try: + env.pip_install(hooks.build_sys_requires) + log.info('Installed static build dependencies') + except CalledProcessError: + log.error('Failed to install static build dependencies') + return False + + try: + reqs = hooks.get_requires_for_build_sdist({}) + log.info('Got build requires: %s', reqs) + except: + log.error('Failure in get_requires_for_build_sdist', exc_info=True) + return False + + try: + env.pip_install(reqs) + log.info('Installed dynamic build dependencies') + except CalledProcessError: + log.error('Failed to install dynamic build dependencies') + return False + + td = mkdtemp() + log.info('Trying to build sdist in %s', td) + try: + try: + filename = hooks.build_sdist(td, {}) + log.info('build_sdist returned %r', filename) + except: + log.info('Failure in build_sdist', exc_info=True) + return False + + if not filename.endswith('.tar.gz'): + log.error("Filename %s doesn't have .tar.gz extension", filename) + return False + + path = pjoin(td, filename) + if isfile(path): + log.info("Output file %s exists", path) + else: + log.error("Output file %s does not exist", path) + return False + + if tarfile.is_tarfile(path): + log.info("Output file is a tar file") + else: + log.error("Output file is not a tar file") + return False + + finally: + shutil.rmtree(td) + + return True + +def check_build_wheel(hooks): + with BuildEnvironment() as env: + try: + env.pip_install(hooks.build_sys_requires) + log.info('Installed static build dependencies') + except CalledProcessError: + log.error('Failed to install static build dependencies') + return False + + try: + reqs = hooks.get_requires_for_build_wheel({}) + log.info('Got build requires: %s', reqs) + except: + log.error('Failure in get_requires_for_build_sdist', exc_info=True) + return False + + try: + env.pip_install(reqs) + log.info('Installed dynamic build dependencies') + except CalledProcessError: + log.error('Failed to install dynamic build dependencies') + return False + + td = mkdtemp() + log.info('Trying to build wheel in %s', td) + try: + try: + filename = hooks.build_wheel(td, {}) + log.info('build_wheel returned %r', filename) + except: + log.info('Failure in build_wheel', exc_info=True) + return False + + if not filename.endswith('.whl'): + log.error("Filename %s doesn't have .whl extension", filename) + return False + + path = pjoin(td, filename) + if isfile(path): + log.info("Output file %s exists", path) + else: + log.error("Output file %s does not exist", path) + return False + + if zipfile.is_zipfile(path): + log.info("Output file is a zip file") + else: + log.error("Output file is not a zip file") + return False + + finally: + shutil.rmtree(td) + + return True + + +def check(source_dir): + pyproject = pjoin(source_dir, 'pyproject.toml') + if isfile(pyproject): + log.info('Found pyproject.toml') + else: + log.error('Missing pyproject.toml') + return False + + try: + with open(pyproject) as f: + pyproject_data = toml_load(f) + # Ensure the mandatory data can be loaded + buildsys = pyproject_data['build-system'] + requires = buildsys['requires'] + backend = buildsys['build-backend'] + log.info('Loaded pyproject.toml') + except (TomlError, KeyError): + log.error("Invalid pyproject.toml", exc_info=True) + return False + + hooks = Pep517HookCaller(source_dir, backend) + + sdist_ok = check_build_sdist(hooks) + wheel_ok = check_build_wheel(hooks) + + if not sdist_ok: + log.warning('Sdist checks failed; scroll up to see') + if not wheel_ok: + log.warning('Wheel checks failed') + + return sdist_ok + + +def main(argv=None): + ap = argparse.ArgumentParser() + ap.add_argument('source_dir', + help="A directory containing pyproject.toml") + args = ap.parse_args(argv) + + enable_colourful_output() + + ok = check(args.source_dir) + + if ok: + print(ansi('Checks passed', 'green')) + else: + print(ansi('Checks failed', 'red')) + sys.exit(1) + +ansi_codes = { + 'reset': '\x1b[0m', + 'bold': '\x1b[1m', + 'red': '\x1b[31m', + 'green': '\x1b[32m', +} +def ansi(s, attr): + if os.name != 'nt' and sys.stdout.isatty(): + return ansi_codes[attr] + str(s) + ansi_codes['reset'] + else: + return str(s) + +if __name__ == '__main__': + main() diff --git a/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/colorlog.py b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/colorlog.py new file mode 100644 index 0000000..26cf748 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/colorlog.py @@ -0,0 +1,110 @@ +"""Nicer log formatting with colours. + +Code copied from Tornado, Apache licensed. +""" +# Copyright 2012 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging +import sys + +try: + import curses +except ImportError: + curses = None + +def _stderr_supports_color(): + color = False + if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty(): + try: + curses.setupterm() + if curses.tigetnum("colors") > 0: + color = True + except Exception: + pass + return color + +class LogFormatter(logging.Formatter): + """Log formatter with colour support + """ + DEFAULT_COLORS = { + logging.INFO: 2, # Green + logging.WARNING: 3, # Yellow + logging.ERROR: 1, # Red + logging.CRITICAL: 1, + } + + def __init__(self, color=True, datefmt=None): + r""" + :arg bool color: Enables color support. + :arg string fmt: Log message format. + It will be applied to the attributes dict of log records. The + text between ``%(color)s`` and ``%(end_color)s`` will be colored + depending on the level if color support is on. + :arg dict colors: color mappings from logging level to terminal color + code + :arg string datefmt: Datetime format. + Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. + .. versionchanged:: 3.2 + Added ``fmt`` and ``datefmt`` arguments. + """ + logging.Formatter.__init__(self, datefmt=datefmt) + self._colors = {} + if color and _stderr_supports_color(): + # The curses module has some str/bytes confusion in + # python3. Until version 3.2.3, most methods return + # bytes, but only accept strings. In addition, we want to + # output these strings with the logging module, which + # works with unicode strings. The explicit calls to + # unicode() below are harmless in python2 but will do the + # right conversion in python 3. + fg_color = (curses.tigetstr("setaf") or + curses.tigetstr("setf") or "") + if (3, 0) < sys.version_info < (3, 2, 3): + fg_color = str(fg_color, "ascii") + + for levelno, code in self.DEFAULT_COLORS.items(): + self._colors[levelno] = str(curses.tparm(fg_color, code), "ascii") + self._normal = str(curses.tigetstr("sgr0"), "ascii") + + scr = curses.initscr() + self.termwidth = scr.getmaxyx()[1] + curses.endwin() + else: + self._normal = '' + # Default width is usually 80, but too wide is worse than too narrow + self.termwidth = 70 + + def formatMessage(self, record): + l = len(record.message) + right_text = '{initial}-{name}'.format(initial=record.levelname[0], + name=record.name) + if l + len(right_text) < self.termwidth: + space = ' ' * (self.termwidth - (l + len(right_text))) + else: + space = ' ' + + if record.levelno in self._colors: + start_color = self._colors[record.levelno] + end_color = self._normal + else: + start_color = end_color = '' + + return record.message + space + start_color + right_text + end_color + +def enable_colourful_output(level=logging.INFO): + handler = logging.StreamHandler() + handler.setFormatter(LogFormatter()) + logging.root.addHandler(handler) + logging.root.setLevel(level) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/compat.py b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/compat.py new file mode 100644 index 0000000..01c66fc --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/compat.py @@ -0,0 +1,23 @@ +"""Handle reading and writing JSON in UTF-8, on Python 3 and 2.""" +import json +import sys + +if sys.version_info[0] >= 3: + # Python 3 + def write_json(obj, path, **kwargs): + with open(path, 'w', encoding='utf-8') as f: + json.dump(obj, f, **kwargs) + + def read_json(path): + with open(path, 'r', encoding='utf-8') as f: + return json.load(f) + +else: + # Python 2 + def write_json(obj, path, **kwargs): + with open(path, 'wb') as f: + json.dump(obj, f, encoding='utf-8', **kwargs) + + def read_json(path): + with open(path, 'rb') as f: + return json.load(f) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/envbuild.py b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/envbuild.py new file mode 100644 index 0000000..c264f46 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/envbuild.py @@ -0,0 +1,150 @@ +"""Build wheels/sdists by installing build deps to a temporary environment. +""" + +import os +import logging +from pip._vendor import pytoml +import shutil +from subprocess import check_call +import sys +from sysconfig import get_paths +from tempfile import mkdtemp + +from .wrappers import Pep517HookCaller + +log = logging.getLogger(__name__) + +def _load_pyproject(source_dir): + with open(os.path.join(source_dir, 'pyproject.toml')) as f: + pyproject_data = pytoml.load(f) + buildsys = pyproject_data['build-system'] + return buildsys['requires'], buildsys['build-backend'] + + +class BuildEnvironment(object): + """Context manager to install build deps in a simple temporary environment + + Based on code I wrote for pip, which is MIT licensed. + """ + # Copyright (c) 2008-2016 The pip developers (see AUTHORS.txt file) + # + # Permission is hereby granted, free of charge, to any person obtaining + # a copy of this software and associated documentation files (the + # "Software"), to deal in the Software without restriction, including + # without limitation the rights to use, copy, modify, merge, publish, + # distribute, sublicense, and/or sell copies of the Software, and to + # permit persons to whom the Software is furnished to do so, subject to + # the following conditions: + # + # The above copyright notice and this permission notice shall be + # included in all copies or substantial portions of the Software. + # + # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + path = None + + def __init__(self, cleanup=True): + self._cleanup = cleanup + + def __enter__(self): + self.path = mkdtemp(prefix='pep517-build-env-') + log.info('Temporary build environment: %s', self.path) + + self.save_path = os.environ.get('PATH', None) + self.save_pythonpath = os.environ.get('PYTHONPATH', None) + + install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix' + install_dirs = get_paths(install_scheme, vars={ + 'base': self.path, + 'platbase': self.path, + }) + + scripts = install_dirs['scripts'] + if self.save_path: + os.environ['PATH'] = scripts + os.pathsep + self.save_path + else: + os.environ['PATH'] = scripts + os.pathsep + os.defpath + + if install_dirs['purelib'] == install_dirs['platlib']: + lib_dirs = install_dirs['purelib'] + else: + lib_dirs = install_dirs['purelib'] + os.pathsep + \ + install_dirs['platlib'] + if self.save_pythonpath: + os.environ['PYTHONPATH'] = lib_dirs + os.pathsep + \ + self.save_pythonpath + else: + os.environ['PYTHONPATH'] = lib_dirs + + return self + + def pip_install(self, reqs): + """Install dependencies into this env by calling pip in a subprocess""" + if not reqs: + return + log.info('Calling pip to install %s', reqs) + check_call([sys.executable, '-m', 'pip', 'install', '--ignore-installed', + '--prefix', self.path] + list(reqs)) + + def __exit__(self, exc_type, exc_val, exc_tb): + if self._cleanup and (self.path is not None) and os.path.isdir(self.path): + shutil.rmtree(self.path) + + if self.save_path is None: + os.environ.pop('PATH', None) + else: + os.environ['PATH'] = self.save_path + + if self.save_pythonpath is None: + os.environ.pop('PYTHONPATH', None) + else: + os.environ['PYTHONPATH'] = self.save_pythonpath + +def build_wheel(source_dir, wheel_dir, config_settings=None): + """Build a wheel from a source directory using PEP 517 hooks. + + :param str source_dir: Source directory containing pyproject.toml + :param str wheel_dir: Target directory to create wheel in + :param dict config_settings: Options to pass to build backend + + This is a blocking function which will run pip in a subprocess to install + build requirements. + """ + if config_settings is None: + config_settings = {} + requires, backend = _load_pyproject(source_dir) + hooks = Pep517HookCaller(source_dir, backend) + + with BuildEnvironment() as env: + env.pip_install(requires) + reqs = hooks.get_requires_for_build_wheel(config_settings) + env.pip_install(reqs) + return hooks.build_wheel(wheel_dir, config_settings) + + +def build_sdist(source_dir, sdist_dir, config_settings=None): + """Build an sdist from a source directory using PEP 517 hooks. + + :param str source_dir: Source directory containing pyproject.toml + :param str sdist_dir: Target directory to place sdist in + :param dict config_settings: Options to pass to build backend + + This is a blocking function which will run pip in a subprocess to install + build requirements. + """ + if config_settings is None: + config_settings = {} + requires, backend = _load_pyproject(source_dir) + hooks = Pep517HookCaller(source_dir, backend) + + with BuildEnvironment() as env: + env.pip_install(requires) + reqs = hooks.get_requires_for_build_sdist(config_settings) + env.pip_install(reqs) + return hooks.build_sdist(sdist_dir, config_settings) diff --git a/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/wrappers.py b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/wrappers.py new file mode 100644 index 0000000..28260f3 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pep517/wrappers.py @@ -0,0 +1,134 @@ +from contextlib import contextmanager +import os +from os.path import dirname, abspath, join as pjoin +import shutil +from subprocess import check_call +import sys +from tempfile import mkdtemp + +from . import compat + +_in_proc_script = pjoin(dirname(abspath(__file__)), '_in_process.py') + +@contextmanager +def tempdir(): + td = mkdtemp() + try: + yield td + finally: + shutil.rmtree(td) + +class UnsupportedOperation(Exception): + """May be raised by build_sdist if the backend indicates that it can't.""" + +class Pep517HookCaller(object): + """A wrapper around a source directory to be built with a PEP 517 backend. + + source_dir : The path to the source directory, containing pyproject.toml. + backend : The build backend spec, as per PEP 517, from pyproject.toml. + """ + def __init__(self, source_dir, build_backend): + self.source_dir = abspath(source_dir) + self.build_backend = build_backend + + def get_requires_for_build_wheel(self, config_settings=None): + """Identify packages required for building a wheel + + Returns a list of dependency specifications, e.g.: + ["wheel >= 0.25", "setuptools"] + + This does not include requirements specified in pyproject.toml. + It returns the result of calling the equivalently named hook in a + subprocess. + """ + return self._call_hook('get_requires_for_build_wheel', { + 'config_settings': config_settings + }) + + def prepare_metadata_for_build_wheel(self, metadata_directory, config_settings=None): + """Prepare a *.dist-info folder with metadata for this project. + + Returns the name of the newly created folder. + + If the build backend defines a hook with this name, it will be called + in a subprocess. If not, the backend will be asked to build a wheel, + and the dist-info extracted from that. + """ + return self._call_hook('prepare_metadata_for_build_wheel', { + 'metadata_directory': abspath(metadata_directory), + 'config_settings': config_settings, + }) + + def build_wheel(self, wheel_directory, config_settings=None, metadata_directory=None): + """Build a wheel from this project. + + Returns the name of the newly created file. + + In general, this will call the 'build_wheel' hook in the backend. + However, if that was previously called by + 'prepare_metadata_for_build_wheel', and the same metadata_directory is + used, the previously built wheel will be copied to wheel_directory. + """ + if metadata_directory is not None: + metadata_directory = abspath(metadata_directory) + return self._call_hook('build_wheel', { + 'wheel_directory': abspath(wheel_directory), + 'config_settings': config_settings, + 'metadata_directory': metadata_directory, + }) + + def get_requires_for_build_sdist(self, config_settings=None): + """Identify packages required for building a wheel + + Returns a list of dependency specifications, e.g.: + ["setuptools >= 26"] + + This does not include requirements specified in pyproject.toml. + It returns the result of calling the equivalently named hook in a + subprocess. + """ + return self._call_hook('get_requires_for_build_sdist', { + 'config_settings': config_settings + }) + + def build_sdist(self, sdist_directory, config_settings=None): + """Build an sdist from this project. + + Returns the name of the newly created file. + + This calls the 'build_sdist' backend hook in a subprocess. + """ + return self._call_hook('build_sdist', { + 'sdist_directory': abspath(sdist_directory), + 'config_settings': config_settings, + }) + + + def _call_hook(self, hook_name, kwargs): + env = os.environ.copy() + + # On Python 2, pytoml returns Unicode values (which is correct) but the + # environment passed to check_call needs to contain string values. We + # convert here by encoding using ASCII (the backend can only contain + # letters, digits and _, . and : characters, and will be used as a + # Python identifier, so non-ASCII content is wrong on Python 2 in + # any case). + if sys.version_info[0] == 2: + build_backend = self.build_backend.encode('ASCII') + else: + build_backend = self.build_backend + + env['PEP517_BUILD_BACKEND'] = build_backend + with tempdir() as td: + compat.write_json({'kwargs': kwargs}, pjoin(td, 'input.json'), + indent=2) + + # Run the hook in a subprocess + check_call([sys.executable, _in_proc_script, hook_name, td], + cwd=self.source_dir, env=env) + + data = compat.read_json(pjoin(td, 'output.json')) + if data.get('unsupported'): + raise UnsupportedOperation + return data['return_val'] + diff --git a/thesisenv/lib/python3.6/site-packages/pip/_vendor/pkg_resources/__init__.py b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pkg_resources/__init__.py index f2815cc..0b432f6 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_vendor/pkg_resources/__init__.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pkg_resources/__init__.py @@ -47,6 +47,11 @@ except ImportError: # Python 3.2 compatibility import imp as _imp +try: + FileExistsError +except NameError: + FileExistsError = OSError + from pip._vendor import six from pip._vendor.six.moves import urllib, map, filter @@ -78,8 +83,11 @@ __import__('pip._vendor.packaging.requirements') __import__('pip._vendor.packaging.markers') -if (3, 0) < sys.version_info < (3, 3): - raise RuntimeError("Python 3.3 or later is required") +__metaclass__ = type + + +if (3, 0) < sys.version_info < (3, 4): + raise RuntimeError("Python 3.4 or later is required") if six.PY2: # Those builtin exceptions are only defined in Python 3 @@ -537,7 +545,7 @@ class IResourceProvider(IMetadataProvider): """List of resource names in the directory (like ``os.listdir()``)""" -class WorkingSet(object): +class WorkingSet: """A collection of active distributions on sys.path (or a similar list)""" def __init__(self, entries=None): @@ -637,13 +645,12 @@ class WorkingSet(object): distributions in the working set, otherwise only ones matching both `group` and `name` are yielded (in distribution order). """ - for dist in self: - entries = dist.get_entry_map(group) - if name is None: - for ep in entries.values(): - yield ep - elif name in entries: - yield entries[name] + return ( + entry + for dist in self + for entry in dist.get_entry_map(group).values() + if name is None or name == entry.name + ) def run_script(self, requires, script_name): """Locate distribution for `requires` and run `script_name` script""" @@ -944,7 +951,7 @@ class _ReqExtras(dict): return not req.marker or any(extra_evals) -class Environment(object): +class Environment: """Searchable snapshot of distributions on a search path""" def __init__( @@ -959,7 +966,7 @@ class Environment(object): `platform` is an optional string specifying the name of the platform that platform-specific distributions must be compatible with. If unspecified, it defaults to the current platform. `python` is an - optional string naming the desired version of Python (e.g. ``'3.3'``); + optional string naming the desired version of Python (e.g. ``'3.6'``); it defaults to the current version. You may explicitly set `platform` (and/or `python`) to ``None`` if you @@ -2087,7 +2094,12 @@ def _handle_ns(packageName, path_item): importer = get_importer(path_item) if importer is None: return None - loader = importer.find_module(packageName) + + # capture warnings due to #1111 + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + loader = importer.find_module(packageName) + if loader is None: return None module = sys.modules.get(packageName) @@ -2132,12 +2144,13 @@ def _rebuild_mod_path(orig_path, package_name, module): parts = path_parts[:-module_parts] return safe_sys_path_index(_normalize_cached(os.sep.join(parts))) - if not isinstance(orig_path, list): - # Is this behavior useful when module.__path__ is not a list? - return + new_path = sorted(orig_path, key=position_in_sys_path) + new_path = [_normalize_cached(p) for p in new_path] - orig_path.sort(key=position_in_sys_path) - module.__path__[:] = [_normalize_cached(p) for p in orig_path] + if isinstance(module.__path__, list): + module.__path__[:] = new_path + else: + module.__path__ = new_path def declare_namespace(packageName): @@ -2148,9 +2161,10 @@ def declare_namespace(packageName): if packageName in _namespace_packages: return - path, parent = sys.path, None - if '.' in packageName: - parent = '.'.join(packageName.split('.')[:-1]) + path = sys.path + parent, _, _ = packageName.rpartition('.') + + if parent: declare_namespace(parent) if parent not in _namespace_packages: __import__(parent) @@ -2161,7 +2175,7 @@ def declare_namespace(packageName): # Track what packages are namespaces, so when new path items are added, # they can be updated - _namespace_packages.setdefault(parent, []).append(packageName) + _namespace_packages.setdefault(parent or None, []).append(packageName) _namespace_packages.setdefault(packageName, []) for path_item in path: @@ -2279,7 +2293,7 @@ EGG_NAME = re.compile( ).match -class EntryPoint(object): +class EntryPoint: """Object representing an advertised importable object""" def __init__(self, name, module_name, attrs=(), extras=(), dist=None): @@ -2433,7 +2447,7 @@ def _version_from_file(lines): return safe_version(value.strip()) or None -class Distribution(object): +class Distribution: """Wrap an actual or potential sys.path entry w/metadata""" PKG_INFO = 'PKG-INFO' @@ -3027,7 +3041,10 @@ def _bypass_ensure_directory(path): dirname, filename = split(path) if dirname and filename and not isdir(dirname): _bypass_ensure_directory(dirname) - mkdir(dirname, 0o755) + try: + mkdir(dirname, 0o755) + except FileExistsError: + pass def split_sections(s): diff --git a/thesisenv/lib/python3.6/site-packages/pip/_vendor/pkg_resources/py31compat.py b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pkg_resources/py31compat.py index 331a51b..a2d3007 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_vendor/pkg_resources/py31compat.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pkg_resources/py31compat.py @@ -2,6 +2,8 @@ import os import errno import sys +from pip._vendor import six + def _makedirs_31(path, exist_ok=False): try: @@ -15,8 +17,7 @@ def _makedirs_31(path, exist_ok=False): # and exists_ok considerations are disentangled. # See https://github.com/pypa/setuptools/pull/1083#issuecomment-315168663 needs_makedirs = ( - sys.version_info < (3, 2, 5) or - (3, 3) <= sys.version_info < (3, 3, 6) or + six.PY2 or (3, 4) <= sys.version_info < (3, 4, 1) ) makedirs = _makedirs_31 if needs_makedirs else os.makedirs diff --git a/thesisenv/lib/python3.6/site-packages/pip/_vendor/pyparsing.py b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pyparsing.py index ba2619c..865152d 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_vendor/pyparsing.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pyparsing.py @@ -1,6 +1,6 @@ # module pyparsing.py # -# Copyright (c) 2003-2016 Paul T. McGuire +# Copyright (c) 2003-2018 Paul T. McGuire # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the @@ -25,6 +25,7 @@ __doc__ = \ """ pyparsing module - Classes and methods to define and execute parsing grammars +============================================================================= The pyparsing module is an alternative approach to creating and executing simple grammars, vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you @@ -58,10 +59,23 @@ The pyparsing module handles some of the problems that are typically vexing when - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.) - quoted strings - embedded comments + + +Getting Started - +----------------- +Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing +classes inherit from. Use the docstrings for examples of how to: + - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes + - construct character word-group expressions using the L{Word} class + - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes + - use L{'+'}, L{'|'}, L{'^'}, and L{'&'} operators to combine simple expressions into more complex ones + - associate names with your parsed results using L{ParserElement.setResultsName} + - find some helpful expression short-cuts like L{delimitedList} and L{oneOf} + - find more useful common expressions in the L{pyparsing_common} namespace class """ -__version__ = "2.2.0" -__versionTime__ = "06 Mar 2017 02:06 UTC" +__version__ = "2.2.1" +__versionTime__ = "18 Sep 2018 00:49 UTC" __author__ = "Paul McGuire " import string @@ -82,6 +96,15 @@ try: except ImportError: from threading import RLock +try: + # Python 3 + from collections.abc import Iterable + from collections.abc import MutableMapping +except ImportError: + # Python 2.7 + from collections import Iterable + from collections import MutableMapping + try: from collections import OrderedDict as _OrderedDict except ImportError: @@ -940,7 +963,7 @@ class ParseResults(object): def __dir__(self): return (dir(type(self)) + list(self.keys())) -collections.MutableMapping.register(ParseResults) +MutableMapping.register(ParseResults) def col (loc,strg): """Returns current column within a string, counting newlines as line separators. @@ -1025,11 +1048,11 @@ def _trim_arity(func, maxargs=2): # special handling for Python 3.5.0 - extra deep call stack by 1 offset = -3 if system_version == (3,5,0) else -2 frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset] - return [(frame_summary.filename, frame_summary.lineno)] + return [frame_summary[:2]] def extract_tb(tb, limit=0): frames = traceback.extract_tb(tb, limit=limit) frame_summary = frames[-1] - return [(frame_summary.filename, frame_summary.lineno)] + return [frame_summary[:2]] else: extract_stack = traceback.extract_stack extract_tb = traceback.extract_tb @@ -1374,7 +1397,7 @@ class ParserElement(object): else: preloc = loc tokensStart = preloc - if self.mayIndexError or loc >= len(instring): + if self.mayIndexError or preloc >= len(instring): try: loc,tokens = self.parseImpl( instring, preloc, doActions ) except IndexError: @@ -1408,7 +1431,6 @@ class ParserElement(object): self.resultsName, asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), modal=self.modalResults ) - if debugging: #~ print ("Matched",self,"->",retTokens.asList()) if (self.debugActions[1] ): @@ -3242,7 +3264,7 @@ class ParseExpression(ParserElement): if isinstance( exprs, basestring ): self.exprs = [ ParserElement._literalStringClass( exprs ) ] - elif isinstance( exprs, collections.Iterable ): + elif isinstance( exprs, Iterable ): exprs = list(exprs) # if sequence of strings provided, wrap with Literal if all(isinstance(expr, basestring) for expr in exprs): @@ -4393,7 +4415,7 @@ def traceParseAction(f): @traceParseAction def remove_duplicate_chars(tokens): - return ''.join(sorted(set(''.join(tokens))) + return ''.join(sorted(set(''.join(tokens)))) wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) print(wds.parseString("slkdjs sld sldd sdlf sdljf")) @@ -4583,7 +4605,7 @@ def oneOf( strs, caseless=False, useRegex=True ): symbols = [] if isinstance(strs,basestring): symbols = strs.split() - elif isinstance(strs, collections.Iterable): + elif isinstance(strs, Iterable): symbols = list(strs) else: warnings.warn("Invalid argument to oneOf, expected string or iterable", @@ -4734,7 +4756,7 @@ stringEnd = StringEnd().setName("stringEnd") _escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1]) _escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16))) _escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8))) -_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE) +_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1) _charRange = Group(_singleChar + Suppress("-") + _singleChar) _reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]" diff --git a/thesisenv/lib/python3.6/site-packages/pip/_vendor/pytoml/parser.py b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pytoml/parser.py index e03a03f..9f94e92 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_vendor/pytoml/parser.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_vendor/pytoml/parser.py @@ -223,8 +223,8 @@ _float_re = re.compile(r'[+-]?(?:0|[1-9](?:_?\d)*)(?:\.\d(?:_?\d)*)?(?:[eE][+-]? _datetime_re = re.compile(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})(\.\d+)?(?:Z|([+-]\d{2}):(\d{2}))') _basicstr_ml_re = re.compile(r'(?:(?:|"|"")[^"\\\000-\011\013-\037])*') -_litstr_re = re.compile(r"[^'\000-\037]*") -_litstr_ml_re = re.compile(r"(?:(?:|'|'')(?:[^'\000-\011\013-\037]))*") +_litstr_re = re.compile(r"[^'\000\010\012-\037]*") +_litstr_ml_re = re.compile(r"(?:(?:|'|'')(?:[^'\000-\010\013-\037]))*") def _p_value(s, object_pairs_hook): pos = s.pos() diff --git a/thesisenv/lib/python3.6/site-packages/pip/_vendor/requests/__init__.py b/thesisenv/lib/python3.6/site-packages/pip/_vendor/requests/__init__.py index 9fb6633..3f3f4f2 100644 --- a/thesisenv/lib/python3.6/site-packages/pip/_vendor/requests/__init__.py +++ b/thesisenv/lib/python3.6/site-packages/pip/_vendor/requests/__init__.py @@ -91,7 +91,7 @@ except (AssertionError, ValueError): RequestsDependencyWarning) # Attempt to enable urllib3's SNI support, if possible -from pip._internal.compat import WINDOWS +from pip._internal.utils.compat import WINDOWS if not WINDOWS: try: from pip._vendor.urllib3.contrib import pyopenssl diff --git a/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/INSTALLER b/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/LICENSE.txt b/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/LICENSE.txt new file mode 100644 index 0000000..1e65815 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/LICENSE.txt @@ -0,0 +1,54 @@ +Copyright 2017- Paul Ganssle +Copyright 2017- dateutil contributors (see AUTHORS file) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +The above license applies to all contributions after 2017-12-01, as well as +all contributions that have been re-licensed (see AUTHORS file for the list of +contributors who have re-licensed their code). +-------------------------------------------------------------------------------- +dateutil - Extensions to the standard Python datetime module. + +Copyright (c) 2003-2011 - Gustavo Niemeyer +Copyright (c) 2012-2014 - Tomi Pieviläinen +Copyright (c) 2014-2016 - Yaron de Leeuw +Copyright (c) 2015- - Paul Ganssle +Copyright (c) 2015- - dateutil contributors (see AUTHORS file) + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The above BSD License Applies to all code, even that also covered by Apache 2.0. \ No newline at end of file diff --git a/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/METADATA b/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/METADATA new file mode 100644 index 0000000..935943e --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/METADATA @@ -0,0 +1,190 @@ +Metadata-Version: 2.1 +Name: python-dateutil +Version: 2.7.3 +Summary: Extensions to the standard Python datetime module +Home-page: https://dateutil.readthedocs.io +Author: Gustavo Niemeyer +Author-email: gustavo@niemeyer.net +Maintainer: Paul Ganssle +Maintainer-email: dateutil@python.org +License: Dual License +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: BSD License +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Topic :: Software Development :: Libraries +Requires: six +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.* +Description-Content-Type: text/x-rst +Requires-Dist: six (>=1.5) + +dateutil - powerful extensions to datetime +========================================== + +|pypi| |support| |licence| + +|gitter| |readthedocs| + +|travis| |appveyor| |coverage| + +.. |pypi| image:: https://img.shields.io/pypi/v/python-dateutil.svg?style=flat-square + :target: https://pypi.org/project/python-dateutil/ + :alt: pypi version + +.. |support| image:: https://img.shields.io/pypi/pyversions/python-dateutil.svg?style=flat-square + :target: https://pypi.org/project/python-dateutil/ + :alt: supported Python version + +.. |travis| image:: https://img.shields.io/travis/dateutil/dateutil/master.svg?style=flat-square&label=Travis%20Build + :target: https://travis-ci.org/dateutil/dateutil + :alt: travis build status + +.. |appveyor| image:: https://img.shields.io/appveyor/ci/dateutil/dateutil/master.svg?style=flat-square&logo=appveyor + :target: https://ci.appveyor.com/project/dateutil/dateutil + :alt: appveyor build status + +.. |coverage| image:: https://codecov.io/github/dateutil/dateutil/coverage.svg?branch=master + :target: https://codecov.io/github/dateutil/dateutil?branch=master + :alt: Code coverage + +.. |gitter| image:: https://badges.gitter.im/dateutil/dateutil.svg + :alt: Join the chat at https://gitter.im/dateutil/dateutil + :target: https://gitter.im/dateutil/dateutil + +.. |licence| image:: https://img.shields.io/pypi/l/python-dateutil.svg?style=flat-square + :target: https://pypi.org/project/python-dateutil/ + :alt: licence + +.. |readthedocs| image:: https://img.shields.io/readthedocs/dateutil/latest.svg?style=flat-square&label=Read%20the%20Docs + :alt: Read the documentation at https://dateutil.readthedocs.io/en/latest/ + :target: https://dateutil.readthedocs.io/en/latest/ + +The `dateutil` module provides powerful extensions to +the standard `datetime` module, available in Python. + + +Download +======== +dateutil is available on PyPI +https://pypi.org/project/python-dateutil/ + +The documentation is hosted at: +https://dateutil.readthedocs.io/en/stable/ + +Code +==== +The code and issue tracker are hosted on Github: +https://github.com/dateutil/dateutil/ + +Features +======== + +* Computing of relative deltas (next month, next year, + next monday, last week of month, etc); +* Computing of relative deltas between two given + date and/or datetime objects; +* Computing of dates based on very flexible recurrence rules, + using a superset of the `iCalendar `_ + specification. Parsing of RFC strings is supported as well. +* Generic parsing of dates in almost any string format; +* Timezone (tzinfo) implementations for tzfile(5) format + files (/etc/localtime, /usr/share/zoneinfo, etc), TZ + environment string (in all known formats), iCalendar + format files, given ranges (with help from relative deltas), + local machine timezone, fixed offset timezone, UTC timezone, + and Windows registry-based time zones. +* Internal up-to-date world timezone information based on + Olson's database. +* Computing of Easter Sunday dates for any given year, + using Western, Orthodox or Julian algorithms; +* A comprehensive test suite. + +Quick example +============= +Here's a snapshot, just to give an idea about the power of the +package. For more examples, look at the documentation. + +Suppose you want to know how much time is left, in +years/months/days/etc, before the next easter happening on a +year with a Friday 13th in August, and you want to get today's +date out of the "date" unix system command. Here is the code: + +.. code-block:: python3 + + >>> from dateutil.relativedelta import * + >>> from dateutil.easter import * + >>> from dateutil.rrule import * + >>> from dateutil.parser import * + >>> from datetime import * + >>> now = parse("Sat Oct 11 17:13:46 UTC 2003") + >>> today = now.date() + >>> year = rrule(YEARLY,dtstart=now,bymonth=8,bymonthday=13,byweekday=FR)[0].year + >>> rdelta = relativedelta(easter(year), today) + >>> print("Today is: %s" % today) + Today is: 2003-10-11 + >>> print("Year with next Aug 13th on a Friday is: %s" % year) + Year with next Aug 13th on a Friday is: 2004 + >>> print("How far is the Easter of that year: %s" % rdelta) + How far is the Easter of that year: relativedelta(months=+6) + >>> print("And the Easter of that year is: %s" % (today+rdelta)) + And the Easter of that year is: 2004-04-11 + +Being exactly 6 months ahead was **really** a coincidence :) + +Contributing +============ + +We welcome many types of contributions - bug reports, pull requests (code, infrastructure or documentation fixes). For more information about how to contribute to the project, see the ``CONTRIBUTING.md`` file in the repository. + + +Author +====== +The dateutil module was written by Gustavo Niemeyer +in 2003. + +It is maintained by: + +* Gustavo Niemeyer 2003-2011 +* Tomi Pieviläinen 2012-2014 +* Yaron de Leeuw 2014-2016 +* Paul Ganssle 2015- + +Starting with version 2.4.1, all source and binary distributions will be signed +by a PGP key that has, at the very least, been signed by the key which made the +previous release. A table of release signing keys can be found below: + +=========== ============================ +Releases Signing key fingerprint +=========== ============================ +2.4.1- `6B49 ACBA DCF6 BD1C A206 67AB CD54 FCE3 D964 BEFB`_ (|pgp_mirror|_) +=========== ============================ + + +Contact +======= +Our mailing list is available at `dateutil@python.org `_. As it is hosted by the PSF, it is subject to the `PSF code of +conduct `_. + +License +======= + +All contributions after December 1, 2017 released under dual license - either `Apache 2.0 License `_ or the `BSD 3-Clause License `_. Contributions before December 1, 2017 - except those those explicitly relicensed - are released only under the BSD 3-Clause License. + + +.. _6B49 ACBA DCF6 BD1C A206 67AB CD54 FCE3 D964 BEFB: + https://pgp.mit.edu/pks/lookup?op=vindex&search=0xCD54FCE3D964BEFB + +.. |pgp_mirror| replace:: mirror +.. _pgp_mirror: https://sks-keyservers.net/pks/lookup?op=vindex&search=0xCD54FCE3D964BEFB + + diff --git a/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/RECORD b/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/RECORD new file mode 100644 index 0000000..9e8823a --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/RECORD @@ -0,0 +1,44 @@ +dateutil/__init__.py,sha256=lXElASqwYGwqlrSWSeX19JwF5Be9tNecDa9ebk-0gmk,222 +dateutil/__pycache__/__init__.cpython-36.pyc,, +dateutil/__pycache__/_common.cpython-36.pyc,, +dateutil/__pycache__/_version.cpython-36.pyc,, +dateutil/__pycache__/easter.cpython-36.pyc,, +dateutil/__pycache__/relativedelta.cpython-36.pyc,, +dateutil/__pycache__/rrule.cpython-36.pyc,, +dateutil/__pycache__/tzwin.cpython-36.pyc,, +dateutil/__pycache__/utils.cpython-36.pyc,, +dateutil/_common.py,sha256=77w0yytkrxlYbSn--lDVPUMabUXRR9I3lBv_vQRUqUY,932 +dateutil/_version.py,sha256=XL-cFqVZ8TgiXGz7zwkDsdx_2gsdUEQ56sbKFKDDt7o,116 +dateutil/easter.py,sha256=0liVsgqSx-NPhaFevOJaYgEbrSu2oQQ2o9m_OEBdc-s,2684 +dateutil/parser/__init__.py,sha256=efFZ9gA3I85X8IgQ_Rq6icz4rYdkUevfK8Oxekn3uYU,1727 +dateutil/parser/__pycache__/__init__.cpython-36.pyc,, +dateutil/parser/__pycache__/_parser.cpython-36.pyc,, +dateutil/parser/__pycache__/isoparser.cpython-36.pyc,, +dateutil/parser/_parser.py,sha256=nxl6E6a3C4EsyTL7Npp_to68JoX05BJl3-FUnRag_VM,57607 +dateutil/parser/isoparser.py,sha256=N--NyotwfEShxeOoW5CvpmLRNgJIKIHYv6XgsqlYqDY,12902 +dateutil/relativedelta.py,sha256=ZJj33WexdbkXbPqS4jvyRby1v3NeNOvAgw_YEofKxQM,24418 +dateutil/rrule.py,sha256=w9v8mTO_YF6OM4kgsN1KwukBqQRFGntDBPsSXrfNUww,64802 +dateutil/tz/__init__.py,sha256=xkeNSDRpsFSeqSaaGuLPmhyTfhjZ7W7BZnKcNfFKYUA,551 +dateutil/tz/__pycache__/__init__.cpython-36.pyc,, +dateutil/tz/__pycache__/_common.cpython-36.pyc,, +dateutil/tz/__pycache__/_factories.cpython-36.pyc,, +dateutil/tz/__pycache__/tz.cpython-36.pyc,, +dateutil/tz/__pycache__/win.cpython-36.pyc,, +dateutil/tz/_common.py,sha256=VAR-3Wd2f4FYZyKg09e6PFnflcGhNEABLYBv_wjS-T8,12892 +dateutil/tz/_factories.py,sha256=JUnlX9efOpuhTnp5fDSM_e62cwNy7W5kHcZpJHDhbFE,1434 +dateutil/tz/tz.py,sha256=2s3vRGHL1d48_TGr5kEwVxVMq4tJCHMMLg0SR91AZH4,60472 +dateutil/tz/win.py,sha256=M67X4b3x6RIveSR0Dl19B30Ow8No7Lkc-Hd4xdvNz8Q,11318 +dateutil/tzwin.py,sha256=7Ar4vdQCnnM0mKR3MUjbIKsZrBVfHgdwsJZc_mGYRew,59 +dateutil/utils.py,sha256=wG0nPh2fS274tUSsgywKAz3Ahq8m_-oKWwDtYeOPJuU,1963 +dateutil/zoneinfo/__init__.py,sha256=KYg0pthCMjcp5MXSEiBJn3nMjZeNZav7rlJw5-tz1S4,5889 +dateutil/zoneinfo/__pycache__/__init__.cpython-36.pyc,, +dateutil/zoneinfo/__pycache__/rebuild.cpython-36.pyc,, +dateutil/zoneinfo/dateutil-zoneinfo.tar.gz,sha256=-EsiuJpLHtWxVvWR5EgDPs-BbGpfSTkto8-NYuaBNrw,139130 +dateutil/zoneinfo/rebuild.py,sha256=2uFJQiW3Fl8fVogrSXisJMpLeHI1zGwpvBFF43QdeF0,1719 +python_dateutil-2.7.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +python_dateutil-2.7.3.dist-info/LICENSE.txt,sha256=ugD1Gg2SgjtaHN4n2LW50jIeZ-2NqbwWPv-W1eF-V34,2889 +python_dateutil-2.7.3.dist-info/METADATA,sha256=iKIXl1SQaeVSdwgdhxWQRblzZLyNiKXVwSNxb8LmFNI,7486 +python_dateutil-2.7.3.dist-info/RECORD,, +python_dateutil-2.7.3.dist-info/WHEEL,sha256=J3CsTk7Mf2JNUyhImI-mjX-fmI4oDjyiXgWT4qgZiCE,110 +python_dateutil-2.7.3.dist-info/top_level.txt,sha256=4tjdWkhRZvF7LA_BYe_L9gB2w_p2a-z5y6ArjaRkot8,9 +python_dateutil-2.7.3.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 diff --git a/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/WHEEL b/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/WHEEL new file mode 100644 index 0000000..f21b51c --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.31.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/top_level.txt b/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/top_level.txt new file mode 100644 index 0000000..6650148 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/top_level.txt @@ -0,0 +1 @@ +dateutil diff --git a/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/zip-safe b/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/zip-safe new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/thesisenv/lib/python3.6/site-packages/python_dateutil-2.7.3.dist-info/zip-safe @@ -0,0 +1 @@ +