Skip to content

Commit 1321803

Browse files
committed
mypy: test_parser.py test_phystokens.py test_process.py test_report.py test_results.py test_setup.py
1 parent 2c52782 commit 1321803

8 files changed

+221
-184
lines changed

tests/test_parser.py

Lines changed: 30 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,15 @@
33

44
"""Tests for coverage.py's code parsing."""
55

6+
from __future__ import annotations
7+
68
import ast
79
import os.path
810
import textwrap
911
import warnings
1012

13+
from typing import List
14+
1115
import pytest
1216

1317
from coverage import env
@@ -23,14 +27,14 @@ class PythonParserTest(CoverageTest):
2327

2428
run_in_temp_dir = False
2529

26-
def parse_source(self, text):
30+
def parse_source(self, text: str) -> PythonParser:
2731
"""Parse `text` as source, and return the `PythonParser` used."""
2832
text = textwrap.dedent(text)
2933
parser = PythonParser(text=text, exclude="nocover")
3034
parser.parse_source()
3135
return parser
3236

33-
def test_exit_counts(self):
37+
def test_exit_counts(self) -> None:
3438
parser = self.parse_source("""\
3539
# check some basic branch counting
3640
class Foo:
@@ -47,7 +51,7 @@ class Bar:
4751
2:1, 3:1, 4:2, 5:1, 7:1, 9:1, 10:1
4852
}
4953

50-
def test_generator_exit_counts(self):
54+
def test_generator_exit_counts(self) -> None:
5155
# https://github.com/nedbat/coveragepy/issues/324
5256
parser = self.parse_source("""\
5357
def gen(input):
@@ -63,7 +67,7 @@ def gen(input):
6367
5:1, # list -> exit
6468
}
6569

66-
def test_try_except(self):
70+
def test_try_except(self) -> None:
6771
parser = self.parse_source("""\
6872
try:
6973
a = 2
@@ -79,7 +83,7 @@ def test_try_except(self):
7983
1: 1, 2:1, 3:2, 4:1, 5:2, 6:1, 7:1, 8:1, 9:1
8084
}
8185

82-
def test_excluded_classes(self):
86+
def test_excluded_classes(self) -> None:
8387
parser = self.parse_source("""\
8488
class Foo:
8589
def __init__(self):
@@ -93,7 +97,7 @@ class Bar:
9397
1:0, 2:1, 3:1
9498
}
9599

96-
def test_missing_branch_to_excluded_code(self):
100+
def test_missing_branch_to_excluded_code(self) -> None:
97101
parser = self.parse_source("""\
98102
if fooey:
99103
a = 2
@@ -121,7 +125,7 @@ def foo():
121125
""")
122126
assert parser.exit_counts() == { 1:1, 2:1, 3:1, 6:1 }
123127

124-
def test_indentation_error(self):
128+
def test_indentation_error(self) -> None:
125129
msg = (
126130
"Couldn't parse '<code>' as Python source: " +
127131
"'unindent does not match any outer indentation level' at line 3"
@@ -133,15 +137,15 @@ def test_indentation_error(self):
133137
1
134138
""")
135139

136-
def test_token_error(self):
140+
def test_token_error(self) -> None:
137141
msg = "Couldn't parse '<code>' as Python source: 'EOF in multi-line string' at line 1"
138142
with pytest.raises(NotPython, match=msg):
139143
_ = self.parse_source("""\
140144
'''
141145
""")
142146

143147
@xfail_pypy38
144-
def test_decorator_pragmas(self):
148+
def test_decorator_pragmas(self) -> None:
145149
parser = self.parse_source("""\
146150
# 1
147151
@@ -177,7 +181,7 @@ def func(x=25):
177181
assert parser.statements == {8}
178182

179183
@xfail_pypy38
180-
def test_decorator_pragmas_with_colons(self):
184+
def test_decorator_pragmas_with_colons(self) -> None:
181185
# A colon in a decorator expression would confuse the parser,
182186
# ending the exclusion of the decorated function.
183187
parser = self.parse_source("""\
@@ -197,7 +201,7 @@ def g():
197201
assert parser.raw_statements == raw_statements
198202
assert parser.statements == set()
199203

200-
def test_class_decorator_pragmas(self):
204+
def test_class_decorator_pragmas(self) -> None:
201205
parser = self.parse_source("""\
202206
class Foo(object):
203207
def __init__(self):
@@ -211,7 +215,7 @@ def __init__(self):
211215
assert parser.raw_statements == {1, 2, 3, 5, 6, 7, 8}
212216
assert parser.statements == {1, 2, 3}
213217

214-
def test_empty_decorated_function(self):
218+
def test_empty_decorated_function(self) -> None:
215219
parser = self.parse_source("""\
216220
def decorator(func):
217221
return func
@@ -247,7 +251,7 @@ def bar(self):
247251
assert expected_arcs == parser.arcs()
248252
assert expected_exits == parser.exit_counts()
249253

250-
def test_fuzzed_double_parse(self):
254+
def test_fuzzed_double_parse(self) -> None:
251255
# https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=50381
252256
# The second parse used to raise `TypeError: 'NoneType' object is not iterable`
253257
msg = "EOF in multi-line statement"
@@ -262,13 +266,13 @@ class ParserMissingArcDescriptionTest(CoverageTest):
262266

263267
run_in_temp_dir = False
264268

265-
def parse_text(self, source):
269+
def parse_text(self, source: str) -> PythonParser:
266270
"""Parse Python source, and return the parser object."""
267271
parser = PythonParser(text=textwrap.dedent(source))
268272
parser.parse_source()
269273
return parser
270274

271-
def test_missing_arc_description(self):
275+
def test_missing_arc_description(self) -> None:
272276
# This code is never run, so the actual values don't matter.
273277
parser = self.parse_text("""\
274278
if x:
@@ -304,7 +308,7 @@ def func10():
304308
)
305309
assert expected == parser.missing_arc_description(11, 13)
306310

307-
def test_missing_arc_descriptions_for_small_callables(self):
311+
def test_missing_arc_descriptions_for_small_callables(self) -> None:
308312
parser = self.parse_text("""\
309313
callables = [
310314
lambda: 2,
@@ -323,7 +327,7 @@ def test_missing_arc_descriptions_for_small_callables(self):
323327
expected = "line 5 didn't finish the set comprehension on line 5"
324328
assert expected == parser.missing_arc_description(5, -5)
325329

326-
def test_missing_arc_descriptions_for_exceptions(self):
330+
def test_missing_arc_descriptions_for_exceptions(self) -> None:
327331
parser = self.parse_text("""\
328332
try:
329333
pass
@@ -343,7 +347,7 @@ def test_missing_arc_descriptions_for_exceptions(self):
343347
)
344348
assert expected == parser.missing_arc_description(5, 6)
345349

346-
def test_missing_arc_descriptions_for_finally(self):
350+
def test_missing_arc_descriptions_for_finally(self) -> None:
347351
parser = self.parse_text("""\
348352
def function():
349353
for i in range(2):
@@ -417,7 +421,7 @@ def function():
417421
)
418422
assert expected == parser.missing_arc_description(18, -1)
419423

420-
def test_missing_arc_descriptions_bug460(self):
424+
def test_missing_arc_descriptions_bug460(self) -> None:
421425
parser = self.parse_text("""\
422426
x = 1
423427
d = {
@@ -429,7 +433,7 @@ def test_missing_arc_descriptions_bug460(self):
429433
assert parser.missing_arc_description(2, -3) == "line 3 didn't finish the lambda on line 3"
430434

431435
@pytest.mark.skipif(not env.PYBEHAVIOR.match_case, reason="Match-case is new in 3.10")
432-
def test_match_case_with_default(self):
436+
def test_match_case_with_default(self) -> None:
433437
parser = self.parse_text("""\
434438
for command in ["huh", "go home", "go n"]:
435439
match command.split():
@@ -450,7 +454,7 @@ def test_match_case_with_default(self):
450454
class ParserFileTest(CoverageTest):
451455
"""Tests for coverage.py's code parsing from files."""
452456

453-
def parse_file(self, filename):
457+
def parse_file(self, filename: str) -> PythonParser:
454458
"""Parse `text` as source, and return the `PythonParser` used."""
455459
parser = PythonParser(filename=filename, exclude="nocover")
456460
parser.parse_source()
@@ -459,7 +463,7 @@ def parse_file(self, filename):
459463
@pytest.mark.parametrize("slug, newline", [
460464
("unix", "\n"), ("dos", "\r\n"), ("mac", "\r"),
461465
])
462-
def test_line_endings(self, slug, newline):
466+
def test_line_endings(self, slug: str, newline: str) -> None:
463467
text = """\
464468
# check some basic branch counting
465469
class Foo:
@@ -478,14 +482,14 @@ class Bar:
478482
parser = self.parse_file(fname)
479483
assert parser.exit_counts() == counts, f"Wrong for {fname!r}"
480484

481-
def test_encoding(self):
485+
def test_encoding(self) -> None:
482486
self.make_file("encoded.py", """\
483487
coverage = "\xe7\xf6v\xear\xe3g\xe9"
484488
""")
485489
parser = self.parse_file("encoded.py")
486490
assert parser.exit_counts() == {1: 1}
487491

488-
def test_missing_line_ending(self):
492+
def test_missing_line_ending(self) -> None:
489493
# Test that the set of statements is the same even if a final
490494
# multi-line statement has no final newline.
491495
# https://github.com/nedbat/coveragepy/issues/293
@@ -514,7 +518,7 @@ def test_missing_line_ending(self):
514518
assert parser.statements == {1}
515519

516520

517-
def test_ast_dump():
521+
def test_ast_dump() -> None:
518522
# Run the AST_DUMP code to make sure it doesn't fail, with some light
519523
# assertions. Use parser.py as the test code since it is the longest file,
520524
# and fitting, since it's the AST_DUMP code.
@@ -531,7 +535,7 @@ def test_ast_dump():
531535
# stress_phystoken.tok has deprecation warnings, suppress them.
532536
warnings.filterwarnings("ignore", message=r".*invalid escape sequence",)
533537
ast_root = ast.parse(source)
534-
result = []
538+
result: List[str] = []
535539
ast_dump(ast_root, print=result.append)
536540
if num_lines < 100:
537541
continue

tests/test_phystokens.py

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ class PhysTokensTest(CoverageTest):
5858

5959
run_in_temp_dir = False
6060

61-
def check_tokenization(self, source):
61+
def check_tokenization(self, source: str) -> None:
6262
"""Tokenize `source`, then put it back together, should be the same."""
6363
tokenized = ""
6464
for line in source_token_lines(source):
@@ -71,26 +71,26 @@ def check_tokenization(self, source):
7171
tokenized = re.sub(r"(?m)[ \t]+$", "", tokenized)
7272
assert source == tokenized
7373

74-
def check_file_tokenization(self, fname):
74+
def check_file_tokenization(self, fname: str) -> None:
7575
"""Use the contents of `fname` for `check_tokenization`."""
7676
self.check_tokenization(get_python_source(fname))
7777

78-
def test_simple(self):
78+
def test_simple(self) -> None:
7979
assert list(source_token_lines(SIMPLE)) == SIMPLE_TOKENS
8080
self.check_tokenization(SIMPLE)
8181

82-
def test_missing_final_newline(self):
82+
def test_missing_final_newline(self) -> None:
8383
# We can tokenize source that is missing the final newline.
8484
assert list(source_token_lines(SIMPLE.rstrip())) == SIMPLE_TOKENS
8585

86-
def test_tab_indentation(self):
86+
def test_tab_indentation(self) -> None:
8787
# Mixed tabs and spaces...
8888
assert list(source_token_lines(MIXED_WS)) == MIXED_WS_TOKENS
8989

90-
def test_bug_822(self):
90+
def test_bug_822(self) -> None:
9191
self.check_tokenization(BUG_822)
9292

93-
def test_tokenize_real_file(self):
93+
def test_tokenize_real_file(self) -> None:
9494
# Check the tokenization of a real file (large, btw).
9595
real_file = os.path.join(TESTS_DIR, "test_coverage.py")
9696
self.check_file_tokenization(real_file)
@@ -99,7 +99,7 @@ def test_tokenize_real_file(self):
9999
"stress_phystoken.tok",
100100
"stress_phystoken_dos.tok",
101101
])
102-
def test_stress(self, fname):
102+
def test_stress(self, fname: str) -> None:
103103
# Check the tokenization of the stress-test files.
104104
# And check that those files haven't been incorrectly "fixed".
105105
with warnings.catch_warnings():
@@ -116,7 +116,7 @@ class SoftKeywordTest(CoverageTest):
116116

117117
run_in_temp_dir = False
118118

119-
def test_soft_keywords(self):
119+
def test_soft_keywords(self) -> None:
120120
source = textwrap.dedent("""\
121121
match re.match(something):
122122
case ["what"]:
@@ -168,40 +168,40 @@ class SourceEncodingTest(CoverageTest):
168168

169169
run_in_temp_dir = False
170170

171-
def test_detect_source_encoding(self):
171+
def test_detect_source_encoding(self) -> None:
172172
for _, source, expected in ENCODING_DECLARATION_SOURCES:
173173
assert source_encoding(source) == expected, f"Wrong encoding in {source!r}"
174174

175-
def test_detect_source_encoding_not_in_comment(self):
175+
def test_detect_source_encoding_not_in_comment(self) -> None:
176176
# Should not detect anything here
177177
source = b'def parse(src, encoding=None):\n pass'
178178
assert source_encoding(source) == DEF_ENCODING
179179

180-
def test_dont_detect_source_encoding_on_third_line(self):
180+
def test_dont_detect_source_encoding_on_third_line(self) -> None:
181181
# A coding declaration doesn't count on the third line.
182182
source = b"\n\n# coding=cp850\n\n"
183183
assert source_encoding(source) == DEF_ENCODING
184184

185-
def test_detect_source_encoding_of_empty_file(self):
185+
def test_detect_source_encoding_of_empty_file(self) -> None:
186186
# An important edge case.
187187
assert source_encoding(b"") == DEF_ENCODING
188188

189-
def test_bom(self):
189+
def test_bom(self) -> None:
190190
# A BOM means utf-8.
191191
source = b"\xEF\xBB\xBFtext = 'hello'\n"
192192
assert source_encoding(source) == 'utf-8-sig'
193193

194-
def test_bom_with_encoding(self):
194+
def test_bom_with_encoding(self) -> None:
195195
source = b"\xEF\xBB\xBF# coding: utf-8\ntext = 'hello'\n"
196196
assert source_encoding(source) == 'utf-8-sig'
197197

198-
def test_bom_is_wrong(self):
198+
def test_bom_is_wrong(self) -> None:
199199
# A BOM with an explicit non-utf8 encoding is an error.
200200
source = b"\xEF\xBB\xBF# coding: cp850\n"
201201
with pytest.raises(SyntaxError, match="encoding problem: utf-8"):
202202
source_encoding(source)
203203

204-
def test_unknown_encoding(self):
204+
def test_unknown_encoding(self) -> None:
205205
source = b"# coding: klingon\n"
206206
with pytest.raises(SyntaxError, match="unknown encoding: klingon"):
207207
source_encoding(source)

0 commit comments

Comments
 (0)