From a4e1e7b79d48401bcad03f99c706229ce42d9e40 Mon Sep 17 00:00:00 2001 From: Zbigniew Jędrzejewski-Szmek Date: Jun 18 2019 07:24:03 +0000 Subject: Remove old patches --- diff --git a/0001-Fixes-to-make-asttokens-work-with-Python3.7-and-astr.patch b/0001-Fixes-to-make-asttokens-work-with-Python3.7-and-astr.patch deleted file mode 100644 index b11ea43..0000000 --- a/0001-Fixes-to-make-asttokens-work-with-Python3.7-and-astr.patch +++ /dev/null @@ -1,126 +0,0 @@ -From 69e8e2ab42353479129f14afc542322d3b02de84 Mon Sep 17 00:00:00 2001 -From: Dmitry S -Date: Thu, 5 Jul 2018 15:03:08 -0400 -Subject: [PATCH] Fixes to make asttokens work with Python3.7 and astroid 2.0. - -Fixes detection of non-coding tokens to work with py37. -Skips testing of deep-recursion on astroid 2.0, which no longer supports it. -Adds Python3.7 testing to tox.ini. ---- - asttokens/asttokens.py | 8 ++++---- - asttokens/util.py | 14 ++++++++++++++ - tests/test_mark_tokens.py | 20 ++++++++++++++------ - tox.ini | 5 +++-- - 4 files changed, 35 insertions(+), 12 deletions(-) - -diff --git a/asttokens/asttokens.py b/asttokens/asttokens.py -index c0126a6b78..668620d085 100644 ---- a/asttokens/asttokens.py -+++ b/asttokens/asttokens.py -@@ -20,7 +20,7 @@ import io - import six - from six.moves import xrange # pylint: disable=redefined-builtin - from .line_numbers import LineNumbers --from .util import Token, match_token -+from .util import Token, match_token, is_non_coding_token - from .mark_tokens import MarkTokens - - class ASTTokens(object): -@@ -135,7 +135,7 @@ class ASTTokens(object): - """ - i = tok.index + 1 - if not include_extra: -- while self._tokens[i].type >= token.N_TOKENS: -+ while is_non_coding_token(self._tokens[i].type): - i += 1 - return self._tokens[i] - -@@ -146,7 +146,7 @@ class ASTTokens(object): - """ - i = tok.index - 1 - if not include_extra: -- while self._tokens[i].type >= token.N_TOKENS: -+ while is_non_coding_token(self._tokens[i].type): - i -= 1 - return self._tokens[i] - -@@ -168,7 +168,7 @@ class ASTTokens(object): - include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT. - """ - for i in xrange(first_token.index, last_token.index + 1): -- if include_extra or self._tokens[i].type < token.N_TOKENS: -+ if include_extra or not is_non_coding_token(self._tokens[i].type): - yield self._tokens[i] - - def get_tokens(self, node, include_extra=False): -diff --git a/asttokens/util.py b/asttokens/util.py -index 4dd2f27983..3be33abb57 100644 ---- a/asttokens/util.py -+++ b/asttokens/util.py -@@ -57,6 +57,20 @@ def expect_token(token, tok_type, tok_str=None): - token_repr(tok_type, tok_str), str(token), - token.start[0], token.start[1] + 1)) - -+# These were previously defined in tokenize.py and distinguishable by being greater than -+# token.N_TOKEN. As of python3.7, they are in token.py, and we check for them explicitly. -+if hasattr(token, 'COMMENT'): -+ def is_non_coding_token(token_type): -+ """ -+ These are considered non-coding tokens, as they don't affect the syntax tree. -+ """ -+ return token_type in (token.NL, token.COMMENT, token.ENCODING) -+else: -+ def is_non_coding_token(token_type): -+ """ -+ These are considered non-coding tokens, as they don't affect the syntax tree. -+ """ -+ return token_type >= token.N_TOKENS - - def iter_children(node): - """ -diff --git a/tests/test_mark_tokens.py b/tests/test_mark_tokens.py -index a15934df9c..61c56bab1b 100644 ---- a/tests/test_mark_tokens.py -+++ b/tests/test_mark_tokens.py -@@ -3,6 +3,7 @@ from __future__ import unicode_literals, print_function - import astroid - import six - import sys -+import token - import textwrap - import unittest - from . import tools -@@ -155,13 +156,19 @@ b + # line3 - # to_source() on it because it chokes on recursion depth. So we test individual nodes. - source = tools.read_fixture('astroid/joined_strings.py') - -- astroid.MANAGER.optimize_ast = True -- try: -- m = self.create_mark_checker(source) -- finally: -- astroid.MANAGER.optimize_ast = False -- - if self.is_astroid_test: -+ if getattr(astroid, '__version__', '1') >= '2': -+ # Astroid 2 no longer supports this; see -+ # https://github.com/PyCQA/astroid/issues/557#issuecomment-396004274 -+ self.skipTest('astroid-2.0 does not support this') -+ -+ # Astroid < 2 does support this with optimize_ast set to True -+ astroid.MANAGER.optimize_ast = True -+ try: -+ m = self.create_mark_checker(source) -+ finally: -+ astroid.MANAGER.optimize_ast = False -+ - self.assertEqual(len(m.all_nodes), 4) # This is the result of astroid's optimization - self.assertEqual(m.view_node_types_at(1, 0), {'Module', 'Assign', 'AssignName'}) - const = next(n for n in m.all_nodes if isinstance(n, astroid.nodes.Const)) -@@ -171,6 +178,7 @@ b + # line3 - # astroid could avoid the need for the optimization by using an explicit stack like we do. - #self.assertEqual(m.atok.get_text_range(const), (5, len(source) - 1)) - else: -+ m = self.create_mark_checker(source) - self.assertEqual(len(m.all_nodes), 2104) - self.assertEqual(m.view_node(m.all_nodes[-1]), - "Str:'F1akOFFiRIgPHTZksKBAgMCLGTdGNIAAQgKfDAcgZbj0odOnUA8GBAA7'") diff --git a/0003-line_numbers-fix-off-by-one-in-offset-indexing.patch b/0003-line_numbers-fix-off-by-one-in-offset-indexing.patch deleted file mode 100644 index 12bbc6a..0000000 --- a/0003-line_numbers-fix-off-by-one-in-offset-indexing.patch +++ /dev/null @@ -1,24 +0,0 @@ -From dc520be195e8cd866da90e0edbf560c38ee7094e Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= -Date: Thu, 5 Jul 2018 12:50:00 +0200 -Subject: [PATCH] line_numbers: fix off-by-one in offset indexing - -len(offset) is one past the end of array. I assume this code was supposed -to always force a valid index, so len(offset)-1 should be used. ---- - asttokens/line_numbers.py | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/asttokens/line_numbers.py b/asttokens/line_numbers.py -index b91b00fd6c..64ee81e1c1 100644 ---- a/asttokens/line_numbers.py -+++ b/asttokens/line_numbers.py -@@ -45,7 +45,7 @@ class LineNumbers(object): - offsets.append(len(line_text)) - self._utf8_offset_cache[line] = offsets - -- return offsets[max(0, min(len(offsets), utf8_column))] -+ return offsets[max(0, min(len(offsets)-1, utf8_column))] - - def line_to_offset(self, line, column): - """ diff --git a/0004-test-split-calls-to-to_source-into-seperate-lines.patch b/0004-test-split-calls-to-to_source-into-seperate-lines.patch deleted file mode 100644 index 35379dc..0000000 --- a/0004-test-split-calls-to-to_source-into-seperate-lines.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 4ad635e3e82b0eafac20af2de7125650796c346f Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= -Date: Thu, 5 Jul 2018 12:41:40 +0200 -Subject: [PATCH] test: split calls to to_source() into seperate lines - -This makes it easier to see in the traceback if it the original node -or rebuilt_node caused a failure. ---- - tests/tools.py | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/tests/tools.py b/tests/tools.py -index b8db8fa9ae..7814c66175 100644 ---- a/tests/tools.py -+++ b/tests/tools.py -@@ -125,7 +125,9 @@ class MarkChecker(object): - rebuilt_node = parse_snippet(text, is_expr=util.is_expr(node), is_module=util.is_module(node)) - - # Now we need to check if the two nodes are equivalent. -- test_case.assertEqual(to_source(rebuilt_node), to_source(node)) -+ left = to_source(rebuilt_node) -+ right = to_source(node) -+ test_case.assertEqual(left, right) - tested_nodes += 1 - - return tested_nodes