Development of an internal social media platform with personalised dashboards for students
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

__init__.py 8.2KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. """Python multiprocessing fork with improvements and bugfixes"""
  2. #
  3. # Package analogous to 'threading.py' but using processes
  4. #
  5. # multiprocessing/__init__.py
  6. #
  7. # This package is intended to duplicate the functionality (and much of
  8. # the API) of threading.py but uses processes instead of threads. A
  9. # subpackage 'multiprocessing.dummy' has the same API but is a simple
  10. # wrapper for 'threading'.
  11. #
  12. # Try calling `multiprocessing.doc.main()` to read the html
  13. # documentation in a webbrowser.
  14. #
  15. #
  16. # Copyright (c) 2006-2008, R Oudkerk
  17. # Licensed to PSF under a Contributor Agreement.
  18. #
  19. from __future__ import absolute_import
  20. import os
  21. import sys
  22. import warnings
  23. from .exceptions import ( # noqa
  24. ProcessError,
  25. BufferTooShort,
  26. TimeoutError,
  27. AuthenticationError,
  28. TimeLimitExceeded,
  29. SoftTimeLimitExceeded,
  30. WorkerLostError,
  31. )
  32. from .process import Process, current_process, active_children
  33. from .util import SUBDEBUG, SUBWARNING
  34. VERSION = (3, 3, 0, 23)
  35. __version__ = '.'.join(map(str, VERSION[0:4])) + "".join(VERSION[4:])
  36. __author__ = 'R Oudkerk / Python Software Foundation'
  37. __author_email__ = 'python-dev@python.org'
  38. __maintainer__ = 'Ask Solem'
  39. __contact__ = "ask@celeryproject.org"
  40. __homepage__ = "http://github.com/celery/billiard"
  41. __docformat__ = "restructuredtext"
  42. # -eof meta-
  43. __all__ = [
  44. 'Process', 'current_process', 'active_children', 'freeze_support',
  45. 'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
  46. 'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
  47. 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
  48. 'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array',
  49. 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING', 'set_executable',
  50. 'forking_enable', 'forking_is_enabled'
  51. ]
  52. def ensure_multiprocessing():
  53. from ._ext import ensure_multiprocessing
  54. return ensure_multiprocessing()
  55. W_NO_EXECV = """\
  56. force_execv is not supported as the billiard C extension \
  57. is not installed\
  58. """
  59. #
  60. # Definitions not depending on native semaphores
  61. #
  62. def Manager():
  63. '''
  64. Returns a manager associated with a running server process
  65. The managers methods such as `Lock()`, `Condition()` and `Queue()`
  66. can be used to create shared objects.
  67. '''
  68. from .managers import SyncManager
  69. m = SyncManager()
  70. m.start()
  71. return m
  72. def Pipe(duplex=True, rnonblock=False, wnonblock=False):
  73. '''
  74. Returns two connection object connected by a pipe
  75. '''
  76. from billiard.connection import Pipe
  77. return Pipe(duplex, rnonblock, wnonblock)
  78. def cpu_count():
  79. '''
  80. Returns the number of CPUs in the system
  81. '''
  82. if sys.platform == 'win32':
  83. try:
  84. num = int(os.environ['NUMBER_OF_PROCESSORS'])
  85. except (ValueError, KeyError):
  86. num = 0
  87. elif 'bsd' in sys.platform or sys.platform == 'darwin':
  88. comm = '/sbin/sysctl -n hw.ncpu'
  89. if sys.platform == 'darwin':
  90. comm = '/usr' + comm
  91. try:
  92. with os.popen(comm) as p:
  93. num = int(p.read())
  94. except ValueError:
  95. num = 0
  96. else:
  97. try:
  98. num = os.sysconf('SC_NPROCESSORS_ONLN')
  99. except (ValueError, OSError, AttributeError):
  100. num = 0
  101. if num >= 1:
  102. return num
  103. else:
  104. raise NotImplementedError('cannot determine number of cpus')
  105. def freeze_support():
  106. '''
  107. Check whether this is a fake forked process in a frozen executable.
  108. If so then run code specified by commandline and exit.
  109. '''
  110. if sys.platform == 'win32' and getattr(sys, 'frozen', False):
  111. from .forking import freeze_support
  112. freeze_support()
  113. def get_logger():
  114. '''
  115. Return package logger -- if it does not already exist then it is created
  116. '''
  117. from .util import get_logger
  118. return get_logger()
  119. def log_to_stderr(level=None):
  120. '''
  121. Turn on logging and add a handler which prints to stderr
  122. '''
  123. from .util import log_to_stderr
  124. return log_to_stderr(level)
  125. def allow_connection_pickling():
  126. '''
  127. Install support for sending connections and sockets between processes
  128. '''
  129. from . import reduction # noqa
  130. #
  131. # Definitions depending on native semaphores
  132. #
  133. def Lock():
  134. '''
  135. Returns a non-recursive lock object
  136. '''
  137. from .synchronize import Lock
  138. return Lock()
  139. def RLock():
  140. '''
  141. Returns a recursive lock object
  142. '''
  143. from .synchronize import RLock
  144. return RLock()
  145. def Condition(lock=None):
  146. '''
  147. Returns a condition object
  148. '''
  149. from .synchronize import Condition
  150. return Condition(lock)
  151. def Semaphore(value=1):
  152. '''
  153. Returns a semaphore object
  154. '''
  155. from .synchronize import Semaphore
  156. return Semaphore(value)
  157. def BoundedSemaphore(value=1):
  158. '''
  159. Returns a bounded semaphore object
  160. '''
  161. from .synchronize import BoundedSemaphore
  162. return BoundedSemaphore(value)
  163. def Event():
  164. '''
  165. Returns an event object
  166. '''
  167. from .synchronize import Event
  168. return Event()
  169. def Queue(maxsize=0):
  170. '''
  171. Returns a queue object
  172. '''
  173. from .queues import Queue
  174. return Queue(maxsize)
  175. def JoinableQueue(maxsize=0):
  176. '''
  177. Returns a queue object
  178. '''
  179. from .queues import JoinableQueue
  180. return JoinableQueue(maxsize)
  181. def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None,
  182. timeout=None, soft_timeout=None, lost_worker_timeout=None,
  183. max_restarts=None, max_restart_freq=1, on_process_up=None,
  184. on_process_down=None, on_timeout_set=None, on_timeout_cancel=None,
  185. threads=True, semaphore=None, putlocks=False, allow_restart=False):
  186. '''
  187. Returns a process pool object
  188. '''
  189. from .pool import Pool
  190. return Pool(processes, initializer, initargs, maxtasksperchild,
  191. timeout, soft_timeout, lost_worker_timeout,
  192. max_restarts, max_restart_freq, on_process_up,
  193. on_process_down, on_timeout_set, on_timeout_cancel,
  194. threads, semaphore, putlocks, allow_restart)
  195. def RawValue(typecode_or_type, *args):
  196. '''
  197. Returns a shared object
  198. '''
  199. from .sharedctypes import RawValue
  200. return RawValue(typecode_or_type, *args)
  201. def RawArray(typecode_or_type, size_or_initializer):
  202. '''
  203. Returns a shared array
  204. '''
  205. from .sharedctypes import RawArray
  206. return RawArray(typecode_or_type, size_or_initializer)
  207. def Value(typecode_or_type, *args, **kwds):
  208. '''
  209. Returns a synchronized shared object
  210. '''
  211. from .sharedctypes import Value
  212. return Value(typecode_or_type, *args, **kwds)
  213. def Array(typecode_or_type, size_or_initializer, **kwds):
  214. '''
  215. Returns a synchronized shared array
  216. '''
  217. from .sharedctypes import Array
  218. return Array(typecode_or_type, size_or_initializer, **kwds)
  219. #
  220. #
  221. #
  222. def set_executable(executable):
  223. '''
  224. Sets the path to a python.exe or pythonw.exe binary used to run
  225. child processes on Windows instead of sys.executable.
  226. Useful for people embedding Python.
  227. '''
  228. from .forking import set_executable
  229. set_executable(executable)
  230. def forking_is_enabled():
  231. '''
  232. Returns a boolean value indicating whether billiard is
  233. currently set to create child processes by forking the current
  234. python process rather than by starting a new instances of python.
  235. On Windows this always returns `False`. On Unix it returns `True` by
  236. default.
  237. '''
  238. from . import forking
  239. return forking._forking_is_enabled
  240. def forking_enable(value):
  241. '''
  242. Enable/disable creation of child process by forking the current process.
  243. `value` should be a boolean value. If `value` is true then
  244. forking is enabled. If `value` is false then forking is disabled.
  245. On systems with `os.fork()` forking is enabled by default, and on
  246. other systems it is always disabled.
  247. '''
  248. if not value:
  249. from ._ext import supports_exec
  250. if supports_exec:
  251. from . import forking
  252. if value and not hasattr(os, 'fork'):
  253. raise ValueError('os.fork() not found')
  254. forking._forking_is_enabled = bool(value)
  255. if not value:
  256. os.environ["MULTIPROCESSING_FORKING_DISABLE"] = "1"
  257. else:
  258. warnings.warn(RuntimeWarning(W_NO_EXECV))
  259. if os.environ.get("MULTIPROCESSING_FORKING_DISABLE"):
  260. forking_enable(False)