Development of an internal social media platform with personalised dashboards for students
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

others.py 4.2KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. # -*- coding: utf-8 -*-
  2. #
  3. # Copyright (C) 2016 Andi Albrecht, albrecht.andi@gmail.com
  4. #
  5. # This module is part of python-sqlparse and is released under
  6. # the BSD License: https://opensource.org/licenses/BSD-3-Clause
  7. from sqlparse import sql, tokens as T
  8. from sqlparse.utils import split_unquoted_newlines
  9. class StripCommentsFilter(object):
  10. @staticmethod
  11. def _process(tlist):
  12. def get_next_comment():
  13. # TODO(andi) Comment types should be unified, see related issue38
  14. return tlist.token_next_by(i=sql.Comment, t=T.Comment)
  15. tidx, token = get_next_comment()
  16. while token:
  17. pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
  18. nidx, next_ = tlist.token_next(tidx, skip_ws=False)
  19. # Replace by whitespace if prev and next exist and if they're not
  20. # whitespaces. This doesn't apply if prev or next is a paranthesis.
  21. if (prev_ is None or next_ is None or
  22. prev_.is_whitespace or prev_.match(T.Punctuation, '(') or
  23. next_.is_whitespace or next_.match(T.Punctuation, ')')):
  24. tlist.tokens.remove(token)
  25. else:
  26. tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')
  27. tidx, token = get_next_comment()
  28. def process(self, stmt):
  29. [self.process(sgroup) for sgroup in stmt.get_sublists()]
  30. StripCommentsFilter._process(stmt)
  31. return stmt
  32. class StripWhitespaceFilter(object):
  33. def _stripws(self, tlist):
  34. func_name = '_stripws_{cls}'.format(cls=type(tlist).__name__)
  35. func = getattr(self, func_name.lower(), self._stripws_default)
  36. func(tlist)
  37. @staticmethod
  38. def _stripws_default(tlist):
  39. last_was_ws = False
  40. is_first_char = True
  41. for token in tlist.tokens:
  42. if token.is_whitespace:
  43. token.value = '' if last_was_ws or is_first_char else ' '
  44. last_was_ws = token.is_whitespace
  45. is_first_char = False
  46. def _stripws_identifierlist(self, tlist):
  47. # Removes newlines before commas, see issue140
  48. last_nl = None
  49. for token in list(tlist.tokens):
  50. if last_nl and token.ttype is T.Punctuation and token.value == ',':
  51. tlist.tokens.remove(last_nl)
  52. last_nl = token if token.is_whitespace else None
  53. # next_ = tlist.token_next(token, skip_ws=False)
  54. # if (next_ and not next_.is_whitespace and
  55. # token.ttype is T.Punctuation and token.value == ','):
  56. # tlist.insert_after(token, sql.Token(T.Whitespace, ' '))
  57. return self._stripws_default(tlist)
  58. def _stripws_parenthesis(self, tlist):
  59. if tlist.tokens[1].is_whitespace:
  60. tlist.tokens.pop(1)
  61. if tlist.tokens[-2].is_whitespace:
  62. tlist.tokens.pop(-2)
  63. self._stripws_default(tlist)
  64. def process(self, stmt, depth=0):
  65. [self.process(sgroup, depth + 1) for sgroup in stmt.get_sublists()]
  66. self._stripws(stmt)
  67. if depth == 0 and stmt.tokens and stmt.tokens[-1].is_whitespace:
  68. stmt.tokens.pop(-1)
  69. return stmt
  70. class SpacesAroundOperatorsFilter(object):
  71. @staticmethod
  72. def _process(tlist):
  73. ttypes = (T.Operator, T.Comparison)
  74. tidx, token = tlist.token_next_by(t=ttypes)
  75. while token:
  76. nidx, next_ = tlist.token_next(tidx, skip_ws=False)
  77. if next_ and next_.ttype != T.Whitespace:
  78. tlist.insert_after(tidx, sql.Token(T.Whitespace, ' '))
  79. pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
  80. if prev_ and prev_.ttype != T.Whitespace:
  81. tlist.insert_before(tidx, sql.Token(T.Whitespace, ' '))
  82. tidx += 1 # has to shift since token inserted before it
  83. # assert tlist.token_index(token) == tidx
  84. tidx, token = tlist.token_next_by(t=ttypes, idx=tidx)
  85. def process(self, stmt):
  86. [self.process(sgroup) for sgroup in stmt.get_sublists()]
  87. SpacesAroundOperatorsFilter._process(stmt)
  88. return stmt
  89. # ---------------------------
  90. # postprocess
  91. class SerializerUnicode(object):
  92. @staticmethod
  93. def process(stmt):
  94. lines = split_unquoted_newlines(stmt)
  95. return '\n'.join(line.rstrip() for line in lines)