andreikop / enki

A text editor for programmers
http://enki-editor.org
GNU General Public License v2.0
161 stars 38 forks source link

Exception #291

Closed bjones1 closed 9 years ago

bjones1 commented 9 years ago

I received the following traceback:

CRITICAL:root:Traceback (most recent call last):
  File "C:\Python27\lib\site-packages\qutepart\__init__.py", line 902, in keyPressEvent
    self._insertNewBlock()
  File "C:\Python27\lib\site-packages\qutepart\__init__.py", line 850, in _insertNewBlock
    self._indenter.autoIndentBlock(cursor.block())
  File "C:\Python27\lib\site-packages\qutepart\indenter\__init__.py", line 92, in autoIndentBlock
    indent = self._smartIndenter.computeIndent(block, char)
  File "C:\Python27\lib\site-packages\qutepart\indenter\base.py", line 39, in computeIndent
    return self.computeSmartIndent(block, char)
  File "C:\Python27\lib\site-packages\qutepart\indenter\python.py", line 98, in computeSmartIndent
    return self._computeSmartIndent(block, column)
  File "C:\Python27\lib\site-packages\qutepart\indenter\python.py", line 44, in _computeSmartIndent
    return self._computeSmartIndent(foundBlock, foundColumn)
  File "C:\Python27\lib\site-packages\qutepart\indenter\python.py", line 44, in _computeSmartIndent
    return self._computeSmartIndent(foundBlock, foundColumn)
  File "C:\Python27\lib\site-packages\qutepart\indenter\python.py", line 44, in _computeSmartIndent
    return self._computeSmartIndent(foundBlock, foundColumn)

... LOTS more of these...

  File "C:\Python27\lib\site-packages\qutepart\indenter\python.py", line 44, in _computeSmartIndent
    return self._computeSmartIndent(foundBlock, foundColumn)
  File "C:\Python27\lib\site-packages\qutepart\indenter\python.py", line 40, in _computeSmartIndent
    lineStripped[-2])
  File "C:\Python27\lib\site-packages\qutepart\indenter\base.py", line 141, in findBracketBackward
    if not self._qpart.isComment(foundBlock.blockNumber(), foundColumn):
  File "C:\Python27\lib\site-packages\qutepart\__init__.py", line 740, in isComment
    self._highlighter.isComment(self.document().findBlockByNumber(line), column)
  File "C:\Python27\lib\site-packages\qutepart\syntaxhlighter.py", line 153, in isComment
    return self._syntax.isComment(data, column)
  File "C:\Python27\lib\site-packages\qutepart\syntax\__init__.py", line 137, in isComment
    return self._getTextType(lineData, column) in 'cbh'
RuntimeError: maximum recursion depth exceeded

when editing the file below. I was at line 353 and pressed enter (I think).

# .. -*- coding: utf-8 -*-
#
#    Copyright (C) 2012-2015 Bryan A. Jones.
#
#    This file is part of CodeChat.
#
#    CodeChat is free software: you can redistribute it and/or modify it under
#    the terms of the GNU General Public License as published by the Free
#    Software Foundation, either version 3 of the License, or (at your option)
#    any later version.
#
#    CodeChat is distributed in the hope that it will be useful, but WITHOUT ANY
#    WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
#    FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
#    details.
#
#    You should have received a copy of the GNU General Public License along
#    with CodeChat.  If not, see <http://www.gnu.org/licenses/>.
#
# *********************************
# CodeToRest_test.py - Unit testing
# *********************************
# This test bench exercises the CodeToRest module. To run, execute ``py.test``
# from the command line. Note the period in this command -- ``pytest`` does
# **NOT** work (it is a completely different program).
#
# .. highlight:: none
#
# Imports
# =======
# These are listed in the order prescribed by `PEP 8 <http://www.python.org/dev/peps/pep-0008/#imports>`_.
#
# Library imports
# ---------------
import re
#
# Third-party imports
# -------------------
# Used to run docutils.
from docutils import core
#
# Local application imports
# -------------------------
from .CodeToRest import code_to_rest_string, code_to_html_file
from .LanguageSpecificOptions import LanguageSpecificOptions

# This acutally tests using ``code_to_rest_string``, since that makes
# ``code_to_rest`` easy to call.
class TestCodeToRest(object):
    # Given a string and a language, run it through ``code_to_rest`` and return
    # the resulting string.
    def t(self, in_string, extension = '.c'):
        lso = LanguageSpecificOptions()
        lso.set_language(extension)
        out_string = code_to_rest_string(in_string, lso)
        # For convenience, create the removal string for the chosen language
        unique_remove_comment = lso.comment_string + u' ' + lso.unique_remove_str + u'\n'
        return out_string, unique_remove_comment

    # A single line of code, without an ending ``\n``.
    def test_1(self):
        ret, comment = self.t('testing')
        assert ret ==  '\n\n.. fenced-code::\n\n ' + comment + ' testing\n ' + comment

    # A single line of code, with an ending ``\n``.
    def test_2(self):
        ret, comment = self.t('testing\n')
        assert ret ==  '\n\n.. fenced-code::\n\n ' + comment + ' testing\n ' + comment

    # Several lines of code, with arbitrary indents.
    def test_3(self):
        ret, comment = self.t('testing\n  test 1\n test 2\n   test 3')
        assert ret == '\n\n.. fenced-code::\n\n ' + comment + ' testing\n   test 1\n  test 2\n    test 3\n ' + comment

    # A single line comment, no trailing ``\n``.
    def test_4(self):
        ret, comment = self.t('// testing')
        assert ret == '\ntesting\n'

    # A single line comment, trailing ``\n``.
    def test_5(self):
        ret, comment = self.t('// testing\n')
        assert ret == '\ntesting\n'

    # A multi-line comment.
    def test_5a(self):
        ret, comment = self.t('// testing\n// more testing')
        assert ret == '\ntesting\nmore testing\n'

    # A single line comment with no space after the comment should be treated
    # like code.
    def test_6(self):
        ret, comment = self.t('//testing')
        assert ret == '\n\n.. fenced-code::\n\n ' + comment + ' //testing\n ' + comment

    # A singly indented single-line comment.
    def test_7(self):
        ret, comment = self.t(' // testing')
        assert ret == '\n\n..\n\n testing\n'

    # A doubly indented single-line comment.
    def test_8(self):
        ret, comment = self.t('  // testing')
        assert ret == '\n\n..\n\n ..\n\n  testing\n'

    # A doubly indented multi-line comment.
    def test_9(self):
        ret, comment = self.t('  // testing\n  // more testing')
        assert ret == '\n\n..\n\n ..\n\n  testing\n  more testing\n'

    # Code to comment transition.
    def test_9a(self):
        ret, comment = self.t('testing\n// test')
        assert ret == '\n\n.. fenced-code::\n\n ' + comment + ' testing\n ' + comment + '\ntest\n'

    # A line with just the comment char, but no trailing space.
    def test_10(self):
        ret, comment = self.t('//')
        # Two newlines: one gets added since code_to_rest prepends a ``\n``,
        # assuming a previous line existed; the second comes from the end of
        # ``code_to_rest``, where a final ``\n`` is appended to make sure the
        # file ends with a newline.
        assert ret == '\n\n'

    # A line with just the comment char, with a Microsoft-style line end.
    def test_11(self):
        ret, comment = self.t('//\r\n')
        # Two newlines: see comment in ``test_10``.
        assert ret == '\n\n'

    # Make sure an empty string works.
    def test_12(self):
        ret, comment = self.t('')
        assert ret == '\n'

    # Make sure Unicode works.
    def test_13(self):
        ret, comment = self.t(u'ю')
        assert ret == u'\n\n.. fenced-code::\n\n ' + comment + u' ю\n ' + comment

    # Code to comment transition.
    def test_14(self):
        ret, comment = self.t('testing\n// Comparing')
        assert ret ==  '\n\n.. fenced-code::\n\n ' + comment + ' testing\n ' + comment + '\nComparing\n'

    # Code to comment transition, with leading blank code lines.
    def test_15(self):
        ret, comment = self.t('\ntesting\n// Comparing')
        assert ret ==  '\n\n.. fenced-code::\n\n ' + comment + ' \n testing\n ' + comment + '\nComparing\n'

    # Code to comment transition, with trailing blank code lines.
    def test_16(self):
        ret, comment = self.t('testing\n\n// Comparing')
        assert ret ==  '\n\n.. fenced-code::\n\n ' + comment + ' testing\n \n ' + comment + '\nComparing\n'

    # Comment to code transition.
    def test_17(self):
        ret, comment = self.t('// testing\nComparing')
        assert ret ==  '\ntesting\n\n.. fenced-code::\n\n ' + comment + ' Comparing\n ' + comment

    # Comment to code transition, with leading blank code lines.
    def test_18(self):
        ret, comment = self.t('// testing\n\nComparing')
        assert ret ==  '\ntesting\n\n.. fenced-code::\n\n ' + comment + ' \n Comparing\n ' + comment

    # Comment to code transition, with trailing blank code lines.
    def test_19(self):
        ret, comment = self.t('// testing\nComparing\n\n')
        assert ret ==  '\ntesting\n\n.. fenced-code::\n\n ' + comment + ' Comparing\n \n ' + comment

# Fenced code block testing
# =========================
# Use docutils to test converting a fenced code block to HTML.
class TestRestToHtml(object):
    # Use docutils to convert reST to HTML, then look at the resulting string.
    def t(self, rest):
        html = core.publish_string(rest, writer_name='html')
        # Snip out just the body. Note that ``.`` needs the `re.DOTALL flag
        # <https://docs.python.org/2/library/re.html#re.DOTALL>`_ so that it
        # can match newlines.
        bodyMo = re.search('<body>\n(.*)</body>', html, re.DOTALL)
        body = bodyMo.group(1)
        # docutils wraps the resulting HTML in a <div>. Strip that out as well.
        divMo = re.search('<div class="document">\n\n\n(.*)\n</div>', body, re.DOTALL)
        div = divMo.group(1)
        return div

    # Test the harness -- can we pass a simple string through properly?
    def test_1(self):
        assert self.t('testing') == '<p>testing</p>'

    # Test the harness -- can we pass some code through properly?
    def test_2(self):
        assert (self.t('.. code::\n\n testing') ==
                '<pre class="code literal-block">\ntesting\n</pre>')

    # See if a fenced code block that's too short produces an error.
    def test_3(self):
        assert ('Fenced code block must contain at least two lines.' in
                self.t('.. fenced-code::') )
    def test_4(self):
        assert ('Fenced code block must contain at least two lines.' in
                self.t('.. fenced-code::\n\n First fence') )

    # Verify that a fenced code block with just fences complains about empty
    # output.
    def test_5(self):
        assert ('Content block expected for the '
        in self.t('.. fenced-code::\n\n First fence\n Second fence\n') )

# Check newline preservation **without** syntax highlighting
# ----------------------------------------------------------
    # Check output of a one-line code block surrounded by fences.
    def test_6(self):
        assert (self.t('.. fenced-code::\n\n First fence\n testing\n Second fence\n') ==
                '<pre class="code literal-block">\ntesting\n</pre>')

    # Check that leading newlines are preserved.
    def test_7(self):
        assert (self.t('.. fenced-code::\n\n First fence\n\n testing\n Second fence\n') ==
                '<pre class="code literal-block">\n \ntesting\n</pre>')

    # Check that trailing newlines are preserved.
    def test_8(self):
        assert (self.t('.. fenced-code::\n\n First fence\n testing\n\n Second fence\n') ==
                '<pre class="code literal-block">\ntesting\n \n</pre>')

# Check newline preservation **with** syntax highlighting
# -------------------------------------------------------
    # Check output of a one-line syntax-highlighted code block surrounded by fences.
    def test_9(self):
        assert (self.t('.. fenced-code:: python\n\n First fence\n testing\n Second fence\n') ==
                '<pre class="code python literal-block">\n<span class="name">testing</span>\n</pre>')

    # Check that leading newlines are preserved with syntax highlighting.
    def test_10(self):
        assert (self.t('.. fenced-code:: python\n\n First fence\n\n testing\n Second fence\n') ==
                '<pre class="code python literal-block">\n \n<span class="name">testing</span>\n</pre>')

    # Check that trailing newlines are preserved with syntax highlighting.
    def test_11(self):
        assert (self.t('.. fenced-code:: python\n\n First fence\n testing\n\n Second fence\n') ==
                '<pre class="code python literal-block">\n<span class="name">testing</span>\n \n</pre>')

# Poor coverage of code_to_html_file
# ==================================
class TestCodeToHtmlFile(object):
    def test_1(self):
        code_to_html_file('CodeToRestSphinx.py')

# In development: tests of new Pygments-based parser
# ==================================================
from .CodeToRest import code_file_to_lexer, code_str_to_lexer, \
    group_lexer_tokens, WHITESPACE_GROUP, COMMENT_GROUP, OTHER_GROUP, \
    gather_groups_on_newlines, classify_groups
from pygments.token import Token
import os
class TestCodeToRestNew(object):
    # Check that a simple file or string is tokenized correctly.
    def test_1(self):
        test_py_file = 'usedToTestLexer.py'
        test_py_code = '# A comment\nan_identifier\n'
        test_token_list = [(Token.Comment, u'# A comment'),
                           (Token.Text, u'\n'),
                           (Token.Name, u'an_identifier'),
                           (Token.Text, u'\n')]

        # Use a try/finally to remove the test_py_file even on a test failure.
        try:
            with open(test_py_file, 'w') as f:
                f.write(test_py_code)
            token_list = list(code_file_to_lexer('usedToTestLexer.py'))
            print(token_list)
            assert token_list == test_token_list
        finally:
            os.unlink(test_py_file)

        token_list = list(code_str_to_lexer(test_py_code, 'python'))
        assert token_list == test_token_list

    test_c_code = \
"""#include <stdio.h>

/* A multi-line
   comment */

main(){
  // Empty.
}\n"""

    # Check grouping of a list of tokens.
    def test_2(self):
        token_iter = code_str_to_lexer(self.test_c_code, 'c')
        # Capture both group and string for help in debugging.
        token_group = list(group_lexer_tokens(token_iter))
        # But split the two into separate lists for unit tests.
        group_list, string_list = zip(*token_group)
        assert group_list == (OTHER_GROUP,        # The #include.
                              WHITESPACE_GROUP,   # Up to the /* comment */.
                              COMMENT_GROUP,      # The /* comment */.
                              WHITESPACE_GROUP,   # Up to the code.
                              OTHER_GROUP,        # main(){.
                              WHITESPACE_GROUP,   # Up to the // comment.
                              COMMENT_GROUP,      # // commnet.
                              OTHER_GROUP,        # Closing }.
                              WHITESPACE_GROUP, ) # Final \n.

    # Check grouping of an empty string.
    def test_3(self):
        # Note that this will add a newline to the lexed output, since the
        # `ensurenl <http://pygments.org/docs/lexers/>`_ option is True by
        # default.
        token_iter = code_str_to_lexer('', 'c')
        # Capture both group and string for help in debugging.
        token_group = list(group_lexer_tokens(token_iter))
        assert token_group == [(WHITESPACE_GROUP, u'\n')]

    # Check gathering of groups by newlines.
    def test_4(self):
        token_iter = code_str_to_lexer(self.test_c_code, 'c')
        token_group = group_lexer_tokens(token_iter)
        gathered_group = list(gather_groups_on_newlines(token_group))
        print gathered_group
        expected_group = [
          [(OTHER_GROUP, u'#include <stdio.h>\n')],
          [(WHITESPACE_GROUP, u'\n')],
          [(COMMENT_GROUP, u'/* A multi-line\n')],
          [(COMMENT_GROUP, u'   comment */'), (WHITESPACE_GROUP, u'\n')],
          [(WHITESPACE_GROUP, u'\n')],
          [(OTHER_GROUP, u'main(){'), (WHITESPACE_GROUP, u'\n')],
          [(WHITESPACE_GROUP, u'  '), (COMMENT_GROUP, u'// Empty.\n')],
          [(OTHER_GROUP, u'}'), (WHITESPACE_GROUP, u'\n')] ]
        assert gathered_group == expected_group

# Classifier tests
# ----------------
    # Test a comment.
    def test_5(self):
        cg = list( classify_groups([[
          (COMMENT_GROUP, u'// comment\n')]]) )
        assert cg == [(0, u'// comment\n')]

    # Test whitespace comment.
    def test_6(self):
        cg = list( classify_groups([[
          (WHITESPACE_GROUP, u'  '), (COMMENT_GROUP, u'// comment\n')]]) )
        assert cg == [(2, u'// comment\n')]

    # Test whitespace comment whitespace.
    def test_7(self):
        cg = list( classify_groups([[
          (WHITESPACE_GROUP, u'  '), (COMMENT_GROUP, u'/* comment */'),

                                      \n')]]) )
        assert cg == [(2, u'// comment\n')]
andreikop commented 9 years ago

Fixed in Qutepart