Funktionierender Prototyp des Serious Games zur Vermittlung von Wissen zu Software-Engineering-Arbeitsmodellen.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

autoreload.py 24KB

1 year ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685
  1. import functools
  2. import itertools
  3. import logging
  4. import os
  5. import signal
  6. import subprocess
  7. import sys
  8. import threading
  9. import time
  10. import traceback
  11. import weakref
  12. from collections import defaultdict
  13. from pathlib import Path
  14. from types import ModuleType
  15. from zipimport import zipimporter
  16. import django
  17. from django.apps import apps
  18. from django.core.signals import request_finished
  19. from django.dispatch import Signal
  20. from django.utils.functional import cached_property
  21. from django.utils.version import get_version_tuple
  22. autoreload_started = Signal()
  23. file_changed = Signal()
  24. DJANGO_AUTORELOAD_ENV = "RUN_MAIN"
  25. logger = logging.getLogger("django.utils.autoreload")
  26. # If an error is raised while importing a file, it's not placed in sys.modules.
  27. # This means that any future modifications aren't caught. Keep a list of these
  28. # file paths to allow watching them in the future.
  29. _error_files = []
  30. _exception = None
  31. try:
  32. import termios
  33. except ImportError:
  34. termios = None
  35. try:
  36. import pywatchman
  37. except ImportError:
  38. pywatchman = None
  39. def is_django_module(module):
  40. """Return True if the given module is nested under Django."""
  41. return module.__name__.startswith("django.")
  42. def is_django_path(path):
  43. """Return True if the given file path is nested under Django."""
  44. return Path(django.__file__).parent in Path(path).parents
  45. def check_errors(fn):
  46. @functools.wraps(fn)
  47. def wrapper(*args, **kwargs):
  48. global _exception
  49. try:
  50. fn(*args, **kwargs)
  51. except Exception:
  52. _exception = sys.exc_info()
  53. et, ev, tb = _exception
  54. if getattr(ev, "filename", None) is None:
  55. # get the filename from the last item in the stack
  56. filename = traceback.extract_tb(tb)[-1][0]
  57. else:
  58. filename = ev.filename
  59. if filename not in _error_files:
  60. _error_files.append(filename)
  61. raise
  62. return wrapper
  63. def raise_last_exception():
  64. global _exception
  65. if _exception is not None:
  66. raise _exception[1]
  67. def ensure_echo_on():
  68. """
  69. Ensure that echo mode is enabled. Some tools such as PDB disable
  70. it which causes usability issues after reload.
  71. """
  72. if not termios or not sys.stdin.isatty():
  73. return
  74. attr_list = termios.tcgetattr(sys.stdin)
  75. if not attr_list[3] & termios.ECHO:
  76. attr_list[3] |= termios.ECHO
  77. if hasattr(signal, "SIGTTOU"):
  78. old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
  79. else:
  80. old_handler = None
  81. termios.tcsetattr(sys.stdin, termios.TCSANOW, attr_list)
  82. if old_handler is not None:
  83. signal.signal(signal.SIGTTOU, old_handler)
  84. def iter_all_python_module_files():
  85. # This is a hot path during reloading. Create a stable sorted list of
  86. # modules based on the module name and pass it to iter_modules_and_files().
  87. # This ensures cached results are returned in the usual case that modules
  88. # aren't loaded on the fly.
  89. keys = sorted(sys.modules)
  90. modules = tuple(
  91. m
  92. for m in map(sys.modules.__getitem__, keys)
  93. if not isinstance(m, weakref.ProxyTypes)
  94. )
  95. return iter_modules_and_files(modules, frozenset(_error_files))
  96. @functools.lru_cache(maxsize=1)
  97. def iter_modules_and_files(modules, extra_files):
  98. """Iterate through all modules needed to be watched."""
  99. sys_file_paths = []
  100. for module in modules:
  101. # During debugging (with PyDev) the 'typing.io' and 'typing.re' objects
  102. # are added to sys.modules, however they are types not modules and so
  103. # cause issues here.
  104. if not isinstance(module, ModuleType):
  105. continue
  106. if module.__name__ in ("__main__", "__mp_main__"):
  107. # __main__ (usually manage.py) doesn't always have a __spec__ set.
  108. # Handle this by falling back to using __file__, resolved below.
  109. # See https://docs.python.org/reference/import.html#main-spec
  110. # __file__ may not exists, e.g. when running ipdb debugger.
  111. if hasattr(module, "__file__"):
  112. sys_file_paths.append(module.__file__)
  113. continue
  114. if getattr(module, "__spec__", None) is None:
  115. continue
  116. spec = module.__spec__
  117. # Modules could be loaded from places without a concrete location. If
  118. # this is the case, skip them.
  119. if spec.has_location:
  120. origin = (
  121. spec.loader.archive
  122. if isinstance(spec.loader, zipimporter)
  123. else spec.origin
  124. )
  125. sys_file_paths.append(origin)
  126. results = set()
  127. for filename in itertools.chain(sys_file_paths, extra_files):
  128. if not filename:
  129. continue
  130. path = Path(filename)
  131. try:
  132. if not path.exists():
  133. # The module could have been removed, don't fail loudly if this
  134. # is the case.
  135. continue
  136. except ValueError as e:
  137. # Network filesystems may return null bytes in file paths.
  138. logger.debug('"%s" raised when resolving path: "%s"', e, path)
  139. continue
  140. resolved_path = path.resolve().absolute()
  141. results.add(resolved_path)
  142. return frozenset(results)
  143. @functools.lru_cache(maxsize=1)
  144. def common_roots(paths):
  145. """
  146. Return a tuple of common roots that are shared between the given paths.
  147. File system watchers operate on directories and aren't cheap to create.
  148. Try to find the minimum set of directories to watch that encompass all of
  149. the files that need to be watched.
  150. """
  151. # Inspired from Werkzeug:
  152. # https://github.com/pallets/werkzeug/blob/7477be2853df70a022d9613e765581b9411c3c39/werkzeug/_reloader.py
  153. # Create a sorted list of the path components, longest first.
  154. path_parts = sorted([x.parts for x in paths], key=len, reverse=True)
  155. tree = {}
  156. for chunks in path_parts:
  157. node = tree
  158. # Add each part of the path to the tree.
  159. for chunk in chunks:
  160. node = node.setdefault(chunk, {})
  161. # Clear the last leaf in the tree.
  162. node.clear()
  163. # Turn the tree into a list of Path instances.
  164. def _walk(node, path):
  165. for prefix, child in node.items():
  166. yield from _walk(child, path + (prefix,))
  167. if not node:
  168. yield Path(*path)
  169. return tuple(_walk(tree, ()))
  170. def sys_path_directories():
  171. """
  172. Yield absolute directories from sys.path, ignoring entries that don't
  173. exist.
  174. """
  175. for path in sys.path:
  176. path = Path(path)
  177. if not path.exists():
  178. continue
  179. resolved_path = path.resolve().absolute()
  180. # If the path is a file (like a zip file), watch the parent directory.
  181. if resolved_path.is_file():
  182. yield resolved_path.parent
  183. else:
  184. yield resolved_path
  185. def get_child_arguments():
  186. """
  187. Return the executable. This contains a workaround for Windows if the
  188. executable is reported to not have the .exe extension which can cause bugs
  189. on reloading.
  190. """
  191. import __main__
  192. py_script = Path(sys.argv[0])
  193. args = [sys.executable] + ["-W%s" % o for o in sys.warnoptions]
  194. if sys.implementation.name == "cpython":
  195. args.extend(
  196. f"-X{key}" if value is True else f"-X{key}={value}"
  197. for key, value in sys._xoptions.items()
  198. )
  199. # __spec__ is set when the server was started with the `-m` option,
  200. # see https://docs.python.org/3/reference/import.html#main-spec
  201. # __spec__ may not exist, e.g. when running in a Conda env.
  202. if getattr(__main__, "__spec__", None) is not None:
  203. spec = __main__.__spec__
  204. if (spec.name == "__main__" or spec.name.endswith(".__main__")) and spec.parent:
  205. name = spec.parent
  206. else:
  207. name = spec.name
  208. args += ["-m", name]
  209. args += sys.argv[1:]
  210. elif not py_script.exists():
  211. # sys.argv[0] may not exist for several reasons on Windows.
  212. # It may exist with a .exe extension or have a -script.py suffix.
  213. exe_entrypoint = py_script.with_suffix(".exe")
  214. if exe_entrypoint.exists():
  215. # Should be executed directly, ignoring sys.executable.
  216. return [exe_entrypoint, *sys.argv[1:]]
  217. script_entrypoint = py_script.with_name("%s-script.py" % py_script.name)
  218. if script_entrypoint.exists():
  219. # Should be executed as usual.
  220. return [*args, script_entrypoint, *sys.argv[1:]]
  221. raise RuntimeError("Script %s does not exist." % py_script)
  222. else:
  223. args += sys.argv
  224. return args
  225. def trigger_reload(filename):
  226. logger.info("%s changed, reloading.", filename)
  227. sys.exit(3)
  228. def restart_with_reloader():
  229. new_environ = {**os.environ, DJANGO_AUTORELOAD_ENV: "true"}
  230. args = get_child_arguments()
  231. while True:
  232. p = subprocess.run(args, env=new_environ, close_fds=False)
  233. if p.returncode != 3:
  234. return p.returncode
  235. class BaseReloader:
  236. def __init__(self):
  237. self.extra_files = set()
  238. self.directory_globs = defaultdict(set)
  239. self._stop_condition = threading.Event()
  240. def watch_dir(self, path, glob):
  241. path = Path(path)
  242. try:
  243. path = path.absolute()
  244. except FileNotFoundError:
  245. logger.debug(
  246. "Unable to watch directory %s as it cannot be resolved.",
  247. path,
  248. exc_info=True,
  249. )
  250. return
  251. logger.debug("Watching dir %s with glob %s.", path, glob)
  252. self.directory_globs[path].add(glob)
  253. def watched_files(self, include_globs=True):
  254. """
  255. Yield all files that need to be watched, including module files and
  256. files within globs.
  257. """
  258. yield from iter_all_python_module_files()
  259. yield from self.extra_files
  260. if include_globs:
  261. for directory, patterns in self.directory_globs.items():
  262. for pattern in patterns:
  263. yield from directory.glob(pattern)
  264. def wait_for_apps_ready(self, app_reg, django_main_thread):
  265. """
  266. Wait until Django reports that the apps have been loaded. If the given
  267. thread has terminated before the apps are ready, then a SyntaxError or
  268. other non-recoverable error has been raised. In that case, stop waiting
  269. for the apps_ready event and continue processing.
  270. Return True if the thread is alive and the ready event has been
  271. triggered, or False if the thread is terminated while waiting for the
  272. event.
  273. """
  274. while django_main_thread.is_alive():
  275. if app_reg.ready_event.wait(timeout=0.1):
  276. return True
  277. else:
  278. logger.debug("Main Django thread has terminated before apps are ready.")
  279. return False
  280. def run(self, django_main_thread):
  281. logger.debug("Waiting for apps ready_event.")
  282. self.wait_for_apps_ready(apps, django_main_thread)
  283. from django.urls import get_resolver
  284. # Prevent a race condition where URL modules aren't loaded when the
  285. # reloader starts by accessing the urlconf_module property.
  286. try:
  287. get_resolver().urlconf_module
  288. except Exception:
  289. # Loading the urlconf can result in errors during development.
  290. # If this occurs then swallow the error and continue.
  291. pass
  292. logger.debug("Apps ready_event triggered. Sending autoreload_started signal.")
  293. autoreload_started.send(sender=self)
  294. self.run_loop()
  295. def run_loop(self):
  296. ticker = self.tick()
  297. while not self.should_stop:
  298. try:
  299. next(ticker)
  300. except StopIteration:
  301. break
  302. self.stop()
  303. def tick(self):
  304. """
  305. This generator is called in a loop from run_loop. It's important that
  306. the method takes care of pausing or otherwise waiting for a period of
  307. time. This split between run_loop() and tick() is to improve the
  308. testability of the reloader implementations by decoupling the work they
  309. do from the loop.
  310. """
  311. raise NotImplementedError("subclasses must implement tick().")
  312. @classmethod
  313. def check_availability(cls):
  314. raise NotImplementedError("subclasses must implement check_availability().")
  315. def notify_file_changed(self, path):
  316. results = file_changed.send(sender=self, file_path=path)
  317. logger.debug("%s notified as changed. Signal results: %s.", path, results)
  318. if not any(res[1] for res in results):
  319. trigger_reload(path)
  320. # These are primarily used for testing.
  321. @property
  322. def should_stop(self):
  323. return self._stop_condition.is_set()
  324. def stop(self):
  325. self._stop_condition.set()
  326. class StatReloader(BaseReloader):
  327. SLEEP_TIME = 1 # Check for changes once per second.
  328. def tick(self):
  329. mtimes = {}
  330. while True:
  331. for filepath, mtime in self.snapshot_files():
  332. old_time = mtimes.get(filepath)
  333. mtimes[filepath] = mtime
  334. if old_time is None:
  335. logger.debug("File %s first seen with mtime %s", filepath, mtime)
  336. continue
  337. elif mtime > old_time:
  338. logger.debug(
  339. "File %s previous mtime: %s, current mtime: %s",
  340. filepath,
  341. old_time,
  342. mtime,
  343. )
  344. self.notify_file_changed(filepath)
  345. time.sleep(self.SLEEP_TIME)
  346. yield
  347. def snapshot_files(self):
  348. # watched_files may produce duplicate paths if globs overlap.
  349. seen_files = set()
  350. for file in self.watched_files():
  351. if file in seen_files:
  352. continue
  353. try:
  354. mtime = file.stat().st_mtime
  355. except OSError:
  356. # This is thrown when the file does not exist.
  357. continue
  358. seen_files.add(file)
  359. yield file, mtime
  360. @classmethod
  361. def check_availability(cls):
  362. return True
  363. class WatchmanUnavailable(RuntimeError):
  364. pass
  365. class WatchmanReloader(BaseReloader):
  366. def __init__(self):
  367. self.roots = defaultdict(set)
  368. self.processed_request = threading.Event()
  369. self.client_timeout = int(os.environ.get("DJANGO_WATCHMAN_TIMEOUT", 5))
  370. super().__init__()
  371. @cached_property
  372. def client(self):
  373. return pywatchman.client(timeout=self.client_timeout)
  374. def _watch_root(self, root):
  375. # In practice this shouldn't occur, however, it's possible that a
  376. # directory that doesn't exist yet is being watched. If it's outside of
  377. # sys.path then this will end up a new root. How to handle this isn't
  378. # clear: Not adding the root will likely break when subscribing to the
  379. # changes, however, as this is currently an internal API, no files
  380. # will be being watched outside of sys.path. Fixing this by checking
  381. # inside watch_glob() and watch_dir() is expensive, instead this could
  382. # could fall back to the StatReloader if this case is detected? For
  383. # now, watching its parent, if possible, is sufficient.
  384. if not root.exists():
  385. if not root.parent.exists():
  386. logger.warning(
  387. "Unable to watch root dir %s as neither it or its parent exist.",
  388. root,
  389. )
  390. return
  391. root = root.parent
  392. result = self.client.query("watch-project", str(root.absolute()))
  393. if "warning" in result:
  394. logger.warning("Watchman warning: %s", result["warning"])
  395. logger.debug("Watchman watch-project result: %s", result)
  396. return result["watch"], result.get("relative_path")
  397. @functools.lru_cache
  398. def _get_clock(self, root):
  399. return self.client.query("clock", root)["clock"]
  400. def _subscribe(self, directory, name, expression):
  401. root, rel_path = self._watch_root(directory)
  402. # Only receive notifications of files changing, filtering out other types
  403. # like special files: https://facebook.github.io/watchman/docs/type
  404. only_files_expression = [
  405. "allof",
  406. ["anyof", ["type", "f"], ["type", "l"]],
  407. expression,
  408. ]
  409. query = {
  410. "expression": only_files_expression,
  411. "fields": ["name"],
  412. "since": self._get_clock(root),
  413. "dedup_results": True,
  414. }
  415. if rel_path:
  416. query["relative_root"] = rel_path
  417. logger.debug(
  418. "Issuing watchman subscription %s, for root %s. Query: %s",
  419. name,
  420. root,
  421. query,
  422. )
  423. self.client.query("subscribe", root, name, query)
  424. def _subscribe_dir(self, directory, filenames):
  425. if not directory.exists():
  426. if not directory.parent.exists():
  427. logger.warning(
  428. "Unable to watch directory %s as neither it or its parent exist.",
  429. directory,
  430. )
  431. return
  432. prefix = "files-parent-%s" % directory.name
  433. filenames = ["%s/%s" % (directory.name, filename) for filename in filenames]
  434. directory = directory.parent
  435. expression = ["name", filenames, "wholename"]
  436. else:
  437. prefix = "files"
  438. expression = ["name", filenames]
  439. self._subscribe(directory, "%s:%s" % (prefix, directory), expression)
  440. def _watch_glob(self, directory, patterns):
  441. """
  442. Watch a directory with a specific glob. If the directory doesn't yet
  443. exist, attempt to watch the parent directory and amend the patterns to
  444. include this. It's important this method isn't called more than one per
  445. directory when updating all subscriptions. Subsequent calls will
  446. overwrite the named subscription, so it must include all possible glob
  447. expressions.
  448. """
  449. prefix = "glob"
  450. if not directory.exists():
  451. if not directory.parent.exists():
  452. logger.warning(
  453. "Unable to watch directory %s as neither it or its parent exist.",
  454. directory,
  455. )
  456. return
  457. prefix = "glob-parent-%s" % directory.name
  458. patterns = ["%s/%s" % (directory.name, pattern) for pattern in patterns]
  459. directory = directory.parent
  460. expression = ["anyof"]
  461. for pattern in patterns:
  462. expression.append(["match", pattern, "wholename"])
  463. self._subscribe(directory, "%s:%s" % (prefix, directory), expression)
  464. def watched_roots(self, watched_files):
  465. extra_directories = self.directory_globs.keys()
  466. watched_file_dirs = [f.parent for f in watched_files]
  467. sys_paths = list(sys_path_directories())
  468. return frozenset((*extra_directories, *watched_file_dirs, *sys_paths))
  469. def _update_watches(self):
  470. watched_files = list(self.watched_files(include_globs=False))
  471. found_roots = common_roots(self.watched_roots(watched_files))
  472. logger.debug("Watching %s files", len(watched_files))
  473. logger.debug("Found common roots: %s", found_roots)
  474. # Setup initial roots for performance, shortest roots first.
  475. for root in sorted(found_roots):
  476. self._watch_root(root)
  477. for directory, patterns in self.directory_globs.items():
  478. self._watch_glob(directory, patterns)
  479. # Group sorted watched_files by their parent directory.
  480. sorted_files = sorted(watched_files, key=lambda p: p.parent)
  481. for directory, group in itertools.groupby(sorted_files, key=lambda p: p.parent):
  482. # These paths need to be relative to the parent directory.
  483. self._subscribe_dir(
  484. directory, [str(p.relative_to(directory)) for p in group]
  485. )
  486. def update_watches(self):
  487. try:
  488. self._update_watches()
  489. except Exception as ex:
  490. # If the service is still available, raise the original exception.
  491. if self.check_server_status(ex):
  492. raise
  493. def _check_subscription(self, sub):
  494. subscription = self.client.getSubscription(sub)
  495. if not subscription:
  496. return
  497. logger.debug("Watchman subscription %s has results.", sub)
  498. for result in subscription:
  499. # When using watch-project, it's not simple to get the relative
  500. # directory without storing some specific state. Store the full
  501. # path to the directory in the subscription name, prefixed by its
  502. # type (glob, files).
  503. root_directory = Path(result["subscription"].split(":", 1)[1])
  504. logger.debug("Found root directory %s", root_directory)
  505. for file in result.get("files", []):
  506. self.notify_file_changed(root_directory / file)
  507. def request_processed(self, **kwargs):
  508. logger.debug("Request processed. Setting update_watches event.")
  509. self.processed_request.set()
  510. def tick(self):
  511. request_finished.connect(self.request_processed)
  512. self.update_watches()
  513. while True:
  514. if self.processed_request.is_set():
  515. self.update_watches()
  516. self.processed_request.clear()
  517. try:
  518. self.client.receive()
  519. except pywatchman.SocketTimeout:
  520. pass
  521. except pywatchman.WatchmanError as ex:
  522. logger.debug("Watchman error: %s, checking server status.", ex)
  523. self.check_server_status(ex)
  524. else:
  525. for sub in list(self.client.subs.keys()):
  526. self._check_subscription(sub)
  527. yield
  528. # Protect against busy loops.
  529. time.sleep(0.1)
  530. def stop(self):
  531. self.client.close()
  532. super().stop()
  533. def check_server_status(self, inner_ex=None):
  534. """Return True if the server is available."""
  535. try:
  536. self.client.query("version")
  537. except Exception:
  538. raise WatchmanUnavailable(str(inner_ex)) from inner_ex
  539. return True
  540. @classmethod
  541. def check_availability(cls):
  542. if not pywatchman:
  543. raise WatchmanUnavailable("pywatchman not installed.")
  544. client = pywatchman.client(timeout=0.1)
  545. try:
  546. result = client.capabilityCheck()
  547. except Exception:
  548. # The service is down?
  549. raise WatchmanUnavailable("Cannot connect to the watchman service.")
  550. version = get_version_tuple(result["version"])
  551. # Watchman 4.9 includes multiple improvements to watching project
  552. # directories as well as case insensitive filesystems.
  553. logger.debug("Watchman version %s", version)
  554. if version < (4, 9):
  555. raise WatchmanUnavailable("Watchman 4.9 or later is required.")
  556. def get_reloader():
  557. """Return the most suitable reloader for this environment."""
  558. try:
  559. WatchmanReloader.check_availability()
  560. except WatchmanUnavailable:
  561. return StatReloader()
  562. return WatchmanReloader()
  563. def start_django(reloader, main_func, *args, **kwargs):
  564. ensure_echo_on()
  565. main_func = check_errors(main_func)
  566. django_main_thread = threading.Thread(
  567. target=main_func, args=args, kwargs=kwargs, name="django-main-thread"
  568. )
  569. django_main_thread.daemon = True
  570. django_main_thread.start()
  571. while not reloader.should_stop:
  572. try:
  573. reloader.run(django_main_thread)
  574. except WatchmanUnavailable as ex:
  575. # It's possible that the watchman service shuts down or otherwise
  576. # becomes unavailable. In that case, use the StatReloader.
  577. reloader = StatReloader()
  578. logger.error("Error connecting to Watchman: %s", ex)
  579. logger.info(
  580. "Watching for file changes with %s", reloader.__class__.__name__
  581. )
  582. def run_with_reloader(main_func, *args, **kwargs):
  583. signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
  584. try:
  585. if os.environ.get(DJANGO_AUTORELOAD_ENV) == "true":
  586. reloader = get_reloader()
  587. logger.info(
  588. "Watching for file changes with %s", reloader.__class__.__name__
  589. )
  590. start_django(reloader, main_func, *args, **kwargs)
  591. else:
  592. exit_code = restart_with_reloader()
  593. sys.exit(exit_code)
  594. except KeyboardInterrupt:
  595. pass