From 9a441b97c783ace797325e1626e084db16a9acbf Mon Sep 17 00:00:00 2001 From: finswimmer Date: Wed, 26 Nov 2025 06:23:53 +0100 Subject: [PATCH 1/6] fix: add function to check if docstring is at end of file --- src/docformatter/classify.py | 15 ++++++++ src/docformatter/format.py | 5 ++- tests/_data/string_files/do_format_code.toml | 7 ---- .../_data/string_files/format_functions.toml | 6 ++-- tests/test_docformatter.py | 34 ++++++------------- 5 files changed, 33 insertions(+), 34 deletions(-) diff --git a/src/docformatter/classify.py b/src/docformatter/classify.py index 8597e29..7762eaa 100644 --- a/src/docformatter/classify.py +++ b/src/docformatter/classify.py @@ -471,3 +471,18 @@ def is_string_variable( return True return False + + +def is_docstring_at_end_of_file(tokens: list[tokenize.TokenInfo], index: int) -> bool: + """Determine if the docstring is at the end of the file.""" + for i in range(index + 1, len(tokens)): + tok = tokens[i] + if tok.type not in ( + tokenize.NL, + tokenize.NEWLINE, + tokenize.DEDENT, + tokenize.ENDMARKER, + ): + return False + + return True diff --git a/src/docformatter/format.py b/src/docformatter/format.py index a058810..1c3e039 100644 --- a/src/docformatter/format.py +++ b/src/docformatter/format.py @@ -379,7 +379,10 @@ def _get_newlines_by_type( int The number of newlines to insert after the docstring. """ - if _classify.is_module_docstring(tokens, index): + if _classify.is_docstring_at_end_of_file(tokens, index): + # print("End of file") + return 0 + elif _classify.is_module_docstring(tokens, index): # print("Module") return _get_module_docstring_newlines(black) elif _classify.is_class_docstring(tokens, index): diff --git a/tests/_data/string_files/do_format_code.toml b/tests/_data/string_files/do_format_code.toml index e01cf53..156ffe6 100644 --- a/tests/_data/string_files/do_format_code.toml +++ b/tests/_data/string_files/do_format_code.toml @@ -42,7 +42,6 @@ source=''' expected=''' CONST = 123 """Docstring for CONST.""" - ''' [class_docstring] @@ -60,7 +59,6 @@ expected=''' :cvar test_int: a class attribute. .. py:method:: big_method() """ - ''' [newline_class_variable] @@ -87,7 +85,6 @@ expected=''' test_var2 = 1 """This is a second class variable docstring.""" - ''' [class_attribute_wrap] @@ -102,7 +99,6 @@ expected='''class TestClass: test_int = 1 """This is a very, very, very long docstring that should really be reformatted nicely by docformatter.""" - ''' [newline_outside_docstring] @@ -364,7 +360,6 @@ expected='''class Foo: More stuff. """ - ''' [class_empty_lines_2] @@ -688,7 +683,6 @@ class TestClass: :cvar test_int: a class attribute. ..py:method:: big_method() """ - ''' [issue_139_2] @@ -1134,7 +1128,6 @@ expected=''' #!/usr/bin/env python """a.py.""" - ''' [issue_203] diff --git a/tests/_data/string_files/format_functions.toml b/tests/_data/string_files/format_functions.toml index 1dcf33c..0735b77 100644 --- a/tests/_data/string_files/format_functions.toml +++ b/tests/_data/string_files/format_functions.toml @@ -169,11 +169,11 @@ expected = 1 [get_newlines_by_type_module_docstring] source = '"""Module docstring."""' -expected = 1 +expected = 0 [get_newlines_by_type_module_docstring_black] source = '"""Module docstring."""' -expected = 2 +expected = 0 [get_newlines_by_type_class_docstring] source = ''' @@ -195,7 +195,7 @@ expected = 0 source = '''x = 1 """Docstring for x.""" ''' -expected = 1 +expected = 0 [get_num_rows_columns] token = [5, " ", [3, 10], [3, 40], ''' This is diff --git a/tests/test_docformatter.py b/tests/test_docformatter.py index 63d3df2..3b87440 100644 --- a/tests/test_docformatter.py +++ b/tests/test_docformatter.py @@ -736,11 +736,10 @@ def test_no_pre_summary_space_using_pyproject( See issue #119. """ assert '''\ -@@ -1,2 +1,3 @@ +@@ -1,2 +1,2 @@ class TestFoo(): - """ Docstring that should not have a pre-summary space.""" + """Docstring that should not have a pre-summary space.""" -+ ''' == "\n".join( run_docformatter.communicate()[0].decode().replace("\r", "").split("\n")[2:] ) @@ -787,11 +786,10 @@ def test_pre_summary_space_using_pyproject( See issue #119. """ assert '''\ -@@ -1,2 +1,3 @@ +@@ -1,2 +1,2 @@ class TestFoo(): - """Docstring that should have a pre-summary space.""" + """ Docstring that should have a pre-summary space.""" -+ ''' == "\n".join( run_docformatter.communicate()[0].decode().replace("\r", "").split("\n")[2:] ) @@ -841,7 +839,7 @@ def test_no_pre_summary_newline_using_pyproject( See issue #119. """ assert '''\ -@@ -1,5 +1,7 @@ +@@ -1,5 +1,6 @@ class TestFoo(): """Docstring that should not have a pre-summary newline. @@ -850,7 +848,6 @@ class TestFoo(): + This is a multi-line docstring that should not have a newline placed + before the summary. + """ -+ ''' == "\n".join( run_docformatter.communicate()[0].decode().replace("\r", "").split("\n")[2:] ) @@ -900,7 +897,7 @@ def test_pre_summary_newline_using_pyproject( See issue #119. """ assert '''\ -@@ -1,5 +1,8 @@ +@@ -1,5 +1,7 @@ class TestFoo(): - """Docstring that should have a pre-summary newline. + """ @@ -911,7 +908,6 @@ class TestFoo(): + This is a multi-line docstring that should have a newline placed + before the summary. + """ -+ ''' == "\n".join( run_docformatter.communicate()[0].decode().replace("\r", "").split("\n")[2:] ) @@ -959,13 +955,12 @@ def test_no_pre_summary_multiline_using_pyproject( See issue #119. """ assert '''\ -@@ -1,3 +1,4 @@ +@@ -1,3 +1,3 @@ class TestFoo(): - """Really long summary docstring that should not be - split into a multiline summary.""" + """Really long summary docstring that should not be split into a + multiline summary.""" -+ ''' == "\n".join( run_docformatter.communicate()[0].decode().replace("\r", "").split("\n")[2:] ) @@ -1013,13 +1008,12 @@ def test_pre_summary_multiline_using_pyproject( See issue #119. """ assert '''\ -@@ -1,3 +1,4 @@ +@@ -1,3 +1,3 @@ class TestFoo(): - """Really long summary docstring that should be - split into a multiline summary.""" + """Really long summary docstring that should be split into a multiline + summary.""" -+ ''' == "\n".join( run_docformatter.communicate()[0].decode().replace("\r", "").split("\n")[2:] ) @@ -1070,7 +1064,7 @@ def test_no_blank_using_pyproject( See issue #119. """ assert '''\ -@@ -1,6 +1,7 @@ +@@ -1,6 +1,6 @@ class TestFoo(): """Summary docstring that is followed by a description. @@ -1079,7 +1073,6 @@ class TestFoo(): + This is the description and it shouldn\'t have a blank line inserted + after it. """ -+ ''' == "\n".join( run_docformatter.communicate()[0].decode().replace("\r", "").split("\n")[2:] ) @@ -1130,7 +1123,7 @@ def test_blank_using_pyproject( See issue #119. """ assert '''\ -@@ -1,6 +1,8 @@ +@@ -1,6 +1,7 @@ class TestFoo(): """Summary docstring that is followed by a description. @@ -1140,7 +1133,6 @@ class TestFoo(): + after it. + """ -+ ''' == "\n".join( run_docformatter.communicate()[0].decode().replace("\r", "").split("\n")[2:] ) @@ -1188,7 +1180,7 @@ def test_format_wrap_using_pyproject( See issue #119. """ assert '''\ -@@ -1,3 +1,19 @@ +@@ -1,3 +1,18 @@ class foo(): - """Hello world is a long sentence that will be wrapped at 12 - characters because I\'m using that option in pyproject.toml.""" @@ -1209,7 +1201,6 @@ class foo(): + in pypro + ject.tom + l.""" -+ ''' == "\n".join( run_docformatter.communicate()[0].decode().replace("\r", "").split("\n")[2:] ) @@ -1260,11 +1251,10 @@ def test_no_pre_summary_space_using_setup_cfg( See issue #119. """ assert '''\ -@@ -1,2 +1,3 @@ +@@ -1,2 +1,2 @@ class TestFoo(): - """ Docstring that should not have a pre-summary space.""" + """Docstring that should not have a pre-summary space.""" -+ ''' == "\n".join( run_docformatter.communicate()[0].decode().replace("\r", "").split("\n")[2:] ) @@ -1321,7 +1311,6 @@ def test_in_place_using_setup_cfg( == '''\ class TestFoo(): """Docstring that should not have a pre-summary space.""" - ''' ) @@ -1415,11 +1404,10 @@ def test_check_with_diff_using_setup_cfg( See issue #122. """ assert '''\ -@@ -1,2 +1,3 @@ +@@ -1,2 +1,2 @@ class TestFoo(): - """ Docstring that should not have a pre-summary space.""" + """Docstring that should not have a pre-summary space.""" -+ ''' == "\n".join( run_docformatter.communicate()[0].decode().replace("\r", "").split("\n")[2:] ) From 264cad59892aee95659939880e2501582edde8b7 Mon Sep 17 00:00:00 2001 From: finswimmer Date: Wed, 26 Nov 2025 06:47:03 +0100 Subject: [PATCH 2/6] fix: handle case where class contains only docstring --- src/docformatter/classify.py | 2 +- src/docformatter/format.py | 11 +++++++ tests/_data/string_files/do_format_code.toml | 31 ++++++++++++++++++++ tests/formatter/test_do_format_code.py | 2 ++ 4 files changed, 45 insertions(+), 1 deletion(-) diff --git a/src/docformatter/classify.py b/src/docformatter/classify.py index 7762eaa..0da2570 100644 --- a/src/docformatter/classify.py +++ b/src/docformatter/classify.py @@ -463,7 +463,7 @@ def is_string_variable( try: _token_types = (tokenize.AWAIT, tokenize.OP) except AttributeError: - _token_types = (tokenize.OP,) # type: ignore + _token_types = (tokenize.OP,) if prev_token.type in _token_types and ( '= """' in token.line or token.line in prev_token.line diff --git a/src/docformatter/format.py b/src/docformatter/format.py index 1c3e039..66ae640 100644 --- a/src/docformatter/format.py +++ b/src/docformatter/format.py @@ -260,11 +260,22 @@ def _get_class_docstring_newlines( The number of newlines to insert after the docstring. """ j = index + 1 + indention_level = tokens[index].start[1] # The docstring is followed by a comment. if tokens[j].string.startswith("#"): return 0 + while j < len(tokens): + if tokens[j].type in (tokenize.NL, tokenize.NEWLINE): + j += 1 + continue + + if tokens[j].start[1] < indention_level: + return 2 + + break + return 1 diff --git a/tests/_data/string_files/do_format_code.toml b/tests/_data/string_files/do_format_code.toml index 156ffe6..425eaab 100644 --- a/tests/_data/string_files/do_format_code.toml +++ b/tests/_data/string_files/do_format_code.toml @@ -1160,3 +1160,34 @@ expected='''def foo(bar): Description. """ ''' + +[two_lines_between_stub_classes] +source='''class Foo: + """Foo class.""" +class Bar: + """Bar class.""" +''' +expected='''class Foo: + """Foo class.""" + + +class Bar: + """Bar class.""" +''' + +[two_lines_between_stub_classes_with_preceding_comment] +source='''class Foo: + """Foo class.""" + +# A comment for class Bar +class Bar: + """Bar class.""" +''' +expected='''class Foo: + """Foo class.""" + + +# A comment for class Bar +class Bar: + """Bar class.""" +''' diff --git a/tests/formatter/test_do_format_code.py b/tests/formatter/test_do_format_code.py index 4775f80..4556a50 100644 --- a/tests/formatter/test_do_format_code.py +++ b/tests/formatter/test_do_format_code.py @@ -135,6 +135,8 @@ ("issue_187", NO_ARGS), ("issue_203", NO_ARGS), ("issue_243", NO_ARGS), + ("two_lines_between_stub_classes", NO_ARGS), + ("two_lines_between_stub_classes_with_preceding_comment", NO_ARGS), ], ) def test_do_format_code(test_key, test_args, args): From 62f0712f6f76feee43b80bb24573e054e91ee1e3 Mon Sep 17 00:00:00 2001 From: finswimmer Date: Thu, 27 Nov 2025 06:54:36 +0100 Subject: [PATCH 3/6] chore: fix mypy error --- src/docformatter/classify.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/docformatter/classify.py b/src/docformatter/classify.py index 0da2570..8dbc897 100644 --- a/src/docformatter/classify.py +++ b/src/docformatter/classify.py @@ -460,9 +460,9 @@ def is_string_variable( # TODO: The AWAIT token is removed in Python 3.13 and later. Only Python 3.9 # seems to generate the AWAIT token, so we can safely remove the check for it when # support for Python 3.9 is dropped in April 2026. - try: + if sys.version_info <= (3, 12): _token_types = (tokenize.AWAIT, tokenize.OP) - except AttributeError: + else: _token_types = (tokenize.OP,) if prev_token.type in _token_types and ( From 3076b7c97cb3b10fc26016dfb581cab3dc241517 Mon Sep 17 00:00:00 2001 From: finswimmer Date: Thu, 27 Nov 2025 10:19:40 +0100 Subject: [PATCH 4/6] fix: treat ellipses as code line --- src/docformatter/classify.py | 2 +- tests/_data/string_files/do_format_code.toml | 22 ++++++++++++++++++++ tests/formatter/test_do_format_code.py | 1 + 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/docformatter/classify.py b/src/docformatter/classify.py index 8dbc897..0df287b 100644 --- a/src/docformatter/classify.py +++ b/src/docformatter/classify.py @@ -266,7 +266,7 @@ def is_code_line(token: tokenize.TokenInfo) -> bool: bool True if the token is a code line, False otherwise. """ - if token.type == tokenize.NAME and not ( + if (token.type == tokenize.NAME or token.string == "...") and not ( token.line.strip().startswith("def ") or token.line.strip().startswith("async ") or token.line.strip().startswith("class ") diff --git a/tests/_data/string_files/do_format_code.toml b/tests/_data/string_files/do_format_code.toml index 425eaab..4d2051b 100644 --- a/tests/_data/string_files/do_format_code.toml +++ b/tests/_data/string_files/do_format_code.toml @@ -1191,3 +1191,25 @@ expected='''class Foo: class Bar: """Bar class.""" ''' + +[ellipses_is_code_line] +source='''class Foo: + def bar() -> str: + """Bar.""" + + ... + + def baz() -> None: + """Baz.""" + + ... +''' +expected='''class Foo: + def bar() -> str: + """Bar.""" + ... + + def baz() -> None: + """Baz.""" + ... +''' diff --git a/tests/formatter/test_do_format_code.py b/tests/formatter/test_do_format_code.py index 4556a50..bfae8cf 100644 --- a/tests/formatter/test_do_format_code.py +++ b/tests/formatter/test_do_format_code.py @@ -137,6 +137,7 @@ ("issue_243", NO_ARGS), ("two_lines_between_stub_classes", NO_ARGS), ("two_lines_between_stub_classes_with_preceding_comment", NO_ARGS), + ("ellipses_is_code_line", NO_ARGS), ], ) def test_do_format_code(test_key, test_args, args): From ca2b44b78c9661b067b60490e0f1e7fc1f826eca Mon Sep 17 00:00:00 2001 From: finswimmer Date: Thu, 27 Nov 2025 11:22:45 +0100 Subject: [PATCH 5/6] fix: include workaround to detect f-string in Python < 3.12 --- src/docformatter/classify.py | 9 +++++++++ tests/_data/string_files/do_format_code.toml | 20 ++++++++++++++++++++ tests/formatter/test_do_format_code.py | 2 ++ 3 files changed, 31 insertions(+) diff --git a/src/docformatter/classify.py b/src/docformatter/classify.py index 0df287b..76eefaf 100644 --- a/src/docformatter/classify.py +++ b/src/docformatter/classify.py @@ -317,6 +317,15 @@ def is_f_string(token: tokenize.TokenInfo, prev_token: tokenize.TokenInfo) -> bo if PY312: if tokenize.FSTRING_MIDDLE in [token.type, prev_token.type]: return True + elif any( + [ + token.string.startswith('f"""'), + prev_token.string.startswith('f"""'), + token.string.startswith("f'''"), + prev_token.string.startswith("f'''"), + ] + ): + return True return False diff --git a/tests/_data/string_files/do_format_code.toml b/tests/_data/string_files/do_format_code.toml index 4d2051b..dc22f59 100644 --- a/tests/_data/string_files/do_format_code.toml +++ b/tests/_data/string_files/do_format_code.toml @@ -1213,3 +1213,23 @@ expected='''class Foo: """Baz.""" ... ''' + +[do_not_break_f_string_double_quotes] +source='''foo = f""" + bar +""" +''' +expected='''foo = f""" + bar +""" +''' + +[do_not_break_f_string_single_quotes] +source="""foo = f''' + bar +''' +""" +expected="""foo = f''' + bar +''' +""" diff --git a/tests/formatter/test_do_format_code.py b/tests/formatter/test_do_format_code.py index bfae8cf..861e439 100644 --- a/tests/formatter/test_do_format_code.py +++ b/tests/formatter/test_do_format_code.py @@ -138,6 +138,8 @@ ("two_lines_between_stub_classes", NO_ARGS), ("two_lines_between_stub_classes_with_preceding_comment", NO_ARGS), ("ellipses_is_code_line", NO_ARGS), + ("do_not_break_f_string_double_quotes", NO_ARGS), + ("do_not_break_f_string_single_quotes", NO_ARGS), ], ) def test_do_format_code(test_key, test_args, args): From 882ecbc4068194f878e6c06c8fcd4a9b7d84159a Mon Sep 17 00:00:00 2001 From: finswimmer Date: Thu, 27 Nov 2025 17:23:09 +0100 Subject: [PATCH 6/6] fix: refine newline detection to exclude empty lines --- src/docformatter/classify.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/docformatter/classify.py b/src/docformatter/classify.py index 76eefaf..62bcc7b 100644 --- a/src/docformatter/classify.py +++ b/src/docformatter/classify.py @@ -441,7 +441,7 @@ def is_newline_continuation( if ( token.type in (tokenize.NEWLINE, tokenize.NL) and token.line.strip() in prev_token.line.strip() - and token.line != "\n" + and token.line not in {"\n", "\r\n"} ): return True