Funktionierender Prototyp des Serious Games zur Vermittlung von Wissen zu Software-Engineering-Arbeitsmodellen.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

template.py 10KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. import warnings
  2. from io import StringIO
  3. from django.template.base import Lexer, TokenType
  4. from django.utils.regex_helper import _lazy_re_compile
  5. from . import TranslatorCommentWarning, trim_whitespace
  6. TRANSLATOR_COMMENT_MARK = "Translators"
  7. dot_re = _lazy_re_compile(r"\S")
  8. def blankout(src, char):
  9. """
  10. Change every non-whitespace character to the given char.
  11. Used in the templatize function.
  12. """
  13. return dot_re.sub(char, src)
  14. context_re = _lazy_re_compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""")
  15. inline_re = _lazy_re_compile(
  16. # Match the trans/translate 'some text' part.
  17. r"""^\s*trans(?:late)?\s+((?:"[^"]*?")|(?:'[^']*?'))"""
  18. # Match and ignore optional filters
  19. r"""(?:\s*\|\s*[^\s:]+(?::(?:[^\s'":]+|(?:"[^"]*?")|(?:'[^']*?')))?)*"""
  20. # Match the optional context part
  21. r"""(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*"""
  22. )
  23. block_re = _lazy_re_compile(
  24. r"""^\s*blocktrans(?:late)?(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)"""
  25. )
  26. endblock_re = _lazy_re_compile(r"""^\s*endblocktrans(?:late)?$""")
  27. plural_re = _lazy_re_compile(r"""^\s*plural$""")
  28. constant_re = _lazy_re_compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
  29. def templatize(src, origin=None):
  30. """
  31. Turn a Django template into something that is understood by xgettext. It
  32. does so by translating the Django translation tags into standard gettext
  33. function invocations.
  34. """
  35. out = StringIO("")
  36. message_context = None
  37. intrans = False
  38. inplural = False
  39. trimmed = False
  40. singular = []
  41. plural = []
  42. incomment = False
  43. comment = []
  44. lineno_comment_map = {}
  45. comment_lineno_cache = None
  46. # Adding the u prefix allows gettext to recognize the string (#26093).
  47. raw_prefix = "u"
  48. def join_tokens(tokens, trim=False):
  49. message = "".join(tokens)
  50. if trim:
  51. message = trim_whitespace(message)
  52. return message
  53. for t in Lexer(src).tokenize():
  54. if incomment:
  55. if t.token_type == TokenType.BLOCK and t.contents == "endcomment":
  56. content = "".join(comment)
  57. translators_comment_start = None
  58. for lineno, line in enumerate(content.splitlines(True)):
  59. if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
  60. translators_comment_start = lineno
  61. for lineno, line in enumerate(content.splitlines(True)):
  62. if (
  63. translators_comment_start is not None
  64. and lineno >= translators_comment_start
  65. ):
  66. out.write(" # %s" % line)
  67. else:
  68. out.write(" #\n")
  69. incomment = False
  70. comment = []
  71. else:
  72. comment.append(t.contents)
  73. elif intrans:
  74. if t.token_type == TokenType.BLOCK:
  75. endbmatch = endblock_re.match(t.contents)
  76. pluralmatch = plural_re.match(t.contents)
  77. if endbmatch:
  78. if inplural:
  79. if message_context:
  80. out.write(
  81. " npgettext({p}{!r}, {p}{!r}, {p}{!r},count) ".format(
  82. message_context,
  83. join_tokens(singular, trimmed),
  84. join_tokens(plural, trimmed),
  85. p=raw_prefix,
  86. )
  87. )
  88. else:
  89. out.write(
  90. " ngettext({p}{!r}, {p}{!r}, count) ".format(
  91. join_tokens(singular, trimmed),
  92. join_tokens(plural, trimmed),
  93. p=raw_prefix,
  94. )
  95. )
  96. for part in singular:
  97. out.write(blankout(part, "S"))
  98. for part in plural:
  99. out.write(blankout(part, "P"))
  100. else:
  101. if message_context:
  102. out.write(
  103. " pgettext({p}{!r}, {p}{!r}) ".format(
  104. message_context,
  105. join_tokens(singular, trimmed),
  106. p=raw_prefix,
  107. )
  108. )
  109. else:
  110. out.write(
  111. " gettext({p}{!r}) ".format(
  112. join_tokens(singular, trimmed),
  113. p=raw_prefix,
  114. )
  115. )
  116. for part in singular:
  117. out.write(blankout(part, "S"))
  118. message_context = None
  119. intrans = False
  120. inplural = False
  121. singular = []
  122. plural = []
  123. elif pluralmatch:
  124. inplural = True
  125. else:
  126. filemsg = ""
  127. if origin:
  128. filemsg = "file %s, " % origin
  129. raise SyntaxError(
  130. "Translation blocks must not include other block tags: "
  131. "%s (%sline %d)" % (t.contents, filemsg, t.lineno)
  132. )
  133. elif t.token_type == TokenType.VAR:
  134. if inplural:
  135. plural.append("%%(%s)s" % t.contents)
  136. else:
  137. singular.append("%%(%s)s" % t.contents)
  138. elif t.token_type == TokenType.TEXT:
  139. contents = t.contents.replace("%", "%%")
  140. if inplural:
  141. plural.append(contents)
  142. else:
  143. singular.append(contents)
  144. else:
  145. # Handle comment tokens (`{# ... #}`) plus other constructs on
  146. # the same line:
  147. if comment_lineno_cache is not None:
  148. cur_lineno = t.lineno + t.contents.count("\n")
  149. if comment_lineno_cache == cur_lineno:
  150. if t.token_type != TokenType.COMMENT:
  151. for c in lineno_comment_map[comment_lineno_cache]:
  152. filemsg = ""
  153. if origin:
  154. filemsg = "file %s, " % origin
  155. warn_msg = (
  156. "The translator-targeted comment '%s' "
  157. "(%sline %d) was ignored, because it wasn't "
  158. "the last item on the line."
  159. ) % (c, filemsg, comment_lineno_cache)
  160. warnings.warn(warn_msg, TranslatorCommentWarning)
  161. lineno_comment_map[comment_lineno_cache] = []
  162. else:
  163. out.write(
  164. "# %s" % " | ".join(lineno_comment_map[comment_lineno_cache])
  165. )
  166. comment_lineno_cache = None
  167. if t.token_type == TokenType.BLOCK:
  168. imatch = inline_re.match(t.contents)
  169. bmatch = block_re.match(t.contents)
  170. cmatches = constant_re.findall(t.contents)
  171. if imatch:
  172. g = imatch[1]
  173. if g[0] == '"':
  174. g = g.strip('"')
  175. elif g[0] == "'":
  176. g = g.strip("'")
  177. g = g.replace("%", "%%")
  178. if imatch[2]:
  179. # A context is provided
  180. context_match = context_re.match(imatch[2])
  181. message_context = context_match[1]
  182. if message_context[0] == '"':
  183. message_context = message_context.strip('"')
  184. elif message_context[0] == "'":
  185. message_context = message_context.strip("'")
  186. out.write(
  187. " pgettext({p}{!r}, {p}{!r}) ".format(
  188. message_context, g, p=raw_prefix
  189. )
  190. )
  191. message_context = None
  192. else:
  193. out.write(" gettext({p}{!r}) ".format(g, p=raw_prefix))
  194. elif bmatch:
  195. for fmatch in constant_re.findall(t.contents):
  196. out.write(" _(%s) " % fmatch)
  197. if bmatch[1]:
  198. # A context is provided
  199. context_match = context_re.match(bmatch[1])
  200. message_context = context_match[1]
  201. if message_context[0] == '"':
  202. message_context = message_context.strip('"')
  203. elif message_context[0] == "'":
  204. message_context = message_context.strip("'")
  205. intrans = True
  206. inplural = False
  207. trimmed = "trimmed" in t.split_contents()
  208. singular = []
  209. plural = []
  210. elif cmatches:
  211. for cmatch in cmatches:
  212. out.write(" _(%s) " % cmatch)
  213. elif t.contents == "comment":
  214. incomment = True
  215. else:
  216. out.write(blankout(t.contents, "B"))
  217. elif t.token_type == TokenType.VAR:
  218. parts = t.contents.split("|")
  219. cmatch = constant_re.match(parts[0])
  220. if cmatch:
  221. out.write(" _(%s) " % cmatch[1])
  222. for p in parts[1:]:
  223. if p.find(":_(") >= 0:
  224. out.write(" %s " % p.split(":", 1)[1])
  225. else:
  226. out.write(blankout(p, "F"))
  227. elif t.token_type == TokenType.COMMENT:
  228. if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
  229. lineno_comment_map.setdefault(t.lineno, []).append(t.contents)
  230. comment_lineno_cache = t.lineno
  231. else:
  232. out.write(blankout(t.contents, "X"))
  233. return out.getvalue()