You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

others.py 4.6KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. # -*- coding: utf-8 -*-
  2. #
  3. # Copyright (C) 2009-2018 the sqlparse authors and contributors
  4. # <see AUTHORS file>
  5. #
  6. # This module is part of python-sqlparse and is released under
  7. # the BSD License: https://opensource.org/licenses/BSD-3-Clause
  8. from sqlparse import sql, tokens as T
  9. from sqlparse.utils import split_unquoted_newlines
  10. class StripCommentsFilter(object):
  11. @staticmethod
  12. def _process(tlist):
  13. def get_next_comment():
  14. # TODO(andi) Comment types should be unified, see related issue38
  15. return tlist.token_next_by(i=sql.Comment, t=T.Comment)
  16. tidx, token = get_next_comment()
  17. while token:
  18. pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
  19. nidx, next_ = tlist.token_next(tidx, skip_ws=False)
  20. # Replace by whitespace if prev and next exist and if they're not
  21. # whitespaces. This doesn't apply if prev or next is a parenthesis.
  22. if (prev_ is None or next_ is None
  23. or prev_.is_whitespace or prev_.match(T.Punctuation, '(')
  24. or next_.is_whitespace or next_.match(T.Punctuation, ')')):
  25. # Insert a whitespace to ensure the following SQL produces
  26. # a valid SQL (see #425). For example:
  27. #
  28. # Before: select a--comment\nfrom foo
  29. # After: select a from foo
  30. if prev_ is not None and next_ is None:
  31. tlist.tokens.insert(tidx, sql.Token(T.Whitespace, ' '))
  32. tlist.tokens.remove(token)
  33. else:
  34. tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')
  35. tidx, token = get_next_comment()
  36. def process(self, stmt):
  37. [self.process(sgroup) for sgroup in stmt.get_sublists()]
  38. StripCommentsFilter._process(stmt)
  39. return stmt
  40. class StripWhitespaceFilter(object):
  41. def _stripws(self, tlist):
  42. func_name = '_stripws_{cls}'.format(cls=type(tlist).__name__)
  43. func = getattr(self, func_name.lower(), self._stripws_default)
  44. func(tlist)
  45. @staticmethod
  46. def _stripws_default(tlist):
  47. last_was_ws = False
  48. is_first_char = True
  49. for token in tlist.tokens:
  50. if token.is_whitespace:
  51. token.value = '' if last_was_ws or is_first_char else ' '
  52. last_was_ws = token.is_whitespace
  53. is_first_char = False
  54. def _stripws_identifierlist(self, tlist):
  55. # Removes newlines before commas, see issue140
  56. last_nl = None
  57. for token in list(tlist.tokens):
  58. if last_nl and token.ttype is T.Punctuation and token.value == ',':
  59. tlist.tokens.remove(last_nl)
  60. last_nl = token if token.is_whitespace else None
  61. # next_ = tlist.token_next(token, skip_ws=False)
  62. # if (next_ and not next_.is_whitespace and
  63. # token.ttype is T.Punctuation and token.value == ','):
  64. # tlist.insert_after(token, sql.Token(T.Whitespace, ' '))
  65. return self._stripws_default(tlist)
  66. def _stripws_parenthesis(self, tlist):
  67. while tlist.tokens[1].is_whitespace:
  68. tlist.tokens.pop(1)
  69. while tlist.tokens[-2].is_whitespace:
  70. tlist.tokens.pop(-2)
  71. self._stripws_default(tlist)
  72. def process(self, stmt, depth=0):
  73. [self.process(sgroup, depth + 1) for sgroup in stmt.get_sublists()]
  74. self._stripws(stmt)
  75. if depth == 0 and stmt.tokens and stmt.tokens[-1].is_whitespace:
  76. stmt.tokens.pop(-1)
  77. return stmt
  78. class SpacesAroundOperatorsFilter(object):
  79. @staticmethod
  80. def _process(tlist):
  81. ttypes = (T.Operator, T.Comparison)
  82. tidx, token = tlist.token_next_by(t=ttypes)
  83. while token:
  84. nidx, next_ = tlist.token_next(tidx, skip_ws=False)
  85. if next_ and next_.ttype != T.Whitespace:
  86. tlist.insert_after(tidx, sql.Token(T.Whitespace, ' '))
  87. pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
  88. if prev_ and prev_.ttype != T.Whitespace:
  89. tlist.insert_before(tidx, sql.Token(T.Whitespace, ' '))
  90. tidx += 1 # has to shift since token inserted before it
  91. # assert tlist.token_index(token) == tidx
  92. tidx, token = tlist.token_next_by(t=ttypes, idx=tidx)
  93. def process(self, stmt):
  94. [self.process(sgroup) for sgroup in stmt.get_sublists()]
  95. SpacesAroundOperatorsFilter._process(stmt)
  96. return stmt
  97. # ---------------------------
  98. # postprocess
  99. class SerializerUnicode(object):
  100. @staticmethod
  101. def process(stmt):
  102. lines = split_unquoted_newlines(stmt)
  103. return '\n'.join(line.rstrip() for line in lines)