Skip to content

Commit 798b9d0

Browse files
authored
🔧 Move linting from flake8 to ruff (#268)
1 parent 9251695 commit 798b9d0

File tree

20 files changed

+141
-148
lines changed

20 files changed

+141
-148
lines changed

.pre-commit-config.yaml

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -33,14 +33,13 @@ repos:
3333
hooks:
3434
- id: black
3535

36-
- repo: https://github.com/PyCQA/flake8
37-
rev: 6.0.0
36+
- repo: https://github.com/charliermarsh/ruff-pre-commit
37+
rev: v0.0.270
3838
hooks:
39-
- id: flake8
40-
additional_dependencies: [flake8-bugbear~=22.7]
39+
- id: ruff
4140

4241
- repo: https://github.com/pre-commit/mirrors-mypy
43-
rev: v1.2.0
42+
rev: v1.3.0
4443
hooks:
4544
- id: mypy
4645
additional_dependencies: [mdurl]

docs/conf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ def run_apidoc(app):
121121
shutil.rmtree(api_folder)
122122
os.mkdir(api_folder)
123123

124-
argv = ["-M", "--separate", "-o", api_folder, module_path] + ignore_paths
124+
argv = ["-M", "--separate", "-o", api_folder, module_path, *ignore_paths]
125125

126126
apidoc.OPTIONS.append("ignore-module-all")
127127
apidoc.main(argv)

markdown_it/common/normalize_url.py

Lines changed: 23 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
from __future__ import annotations
22

33
from collections.abc import Callable
4+
from contextlib import suppress
45
import re
56
from urllib.parse import quote, unquote, urlparse, urlunparse # noqa: F401
67

@@ -21,18 +22,17 @@ def normalizeLink(url: str) -> str:
2122
"""
2223
parsed = mdurl.parse(url, slashes_denote_host=True)
2324

24-
if parsed.hostname:
25-
# Encode hostnames in urls like:
26-
# `http://host/`, `https://host/`, `mailto:user@host`, `//host/`
27-
#
28-
# We don't encode unknown schemas, because it's likely that we encode
29-
# something we shouldn't (e.g. `skype:name` treated as `skype:host`)
30-
#
31-
if not parsed.protocol or parsed.protocol in RECODE_HOSTNAME_FOR:
32-
try:
33-
parsed = parsed._replace(hostname=_punycode.to_ascii(parsed.hostname))
34-
except Exception:
35-
pass
25+
# Encode hostnames in urls like:
26+
# `http://host/`, `https://host/`, `mailto:user@host`, `//host/`
27+
#
28+
# We don't encode unknown schemas, because it's likely that we encode
29+
# something we shouldn't (e.g. `skype:name` treated as `skype:host`)
30+
#
31+
if parsed.hostname and (
32+
not parsed.protocol or parsed.protocol in RECODE_HOSTNAME_FOR
33+
):
34+
with suppress(Exception):
35+
parsed = parsed._replace(hostname=_punycode.to_ascii(parsed.hostname))
3636

3737
return mdurl.encode(mdurl.format(parsed))
3838

@@ -47,18 +47,17 @@ def normalizeLinkText(url: str) -> str:
4747
"""
4848
parsed = mdurl.parse(url, slashes_denote_host=True)
4949

50-
if parsed.hostname:
51-
# Encode hostnames in urls like:
52-
# `http://host/`, `https://host/`, `mailto:user@host`, `//host/`
53-
#
54-
# We don't encode unknown schemas, because it's likely that we encode
55-
# something we shouldn't (e.g. `skype:name` treated as `skype:host`)
56-
#
57-
if not parsed.protocol or parsed.protocol in RECODE_HOSTNAME_FOR:
58-
try:
59-
parsed = parsed._replace(hostname=_punycode.to_unicode(parsed.hostname))
60-
except Exception:
61-
pass
50+
# Encode hostnames in urls like:
51+
# `http://host/`, `https://host/`, `mailto:user@host`, `//host/`
52+
#
53+
# We don't encode unknown schemas, because it's likely that we encode
54+
# something we shouldn't (e.g. `skype:name` treated as `skype:host`)
55+
#
56+
if parsed.hostname and (
57+
not parsed.protocol or parsed.protocol in RECODE_HOSTNAME_FOR
58+
):
59+
with suppress(Exception):
60+
parsed = parsed._replace(hostname=_punycode.to_unicode(parsed.hostname))
6261

6362
# add '%' to exclude list because of https://github.com/markdown-it/markdown-it/issues/720
6463
return mdurl.decode(mdurl.format(parsed), mdurl.DECODE_DEFAULT_CHARS + "%")

markdown_it/main.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,11 @@
44
from contextlib import contextmanager
55
from typing import Any, Literal, overload
66

7-
from . import helpers, presets # noqa F401
8-
from .common import normalize_url, utils # noqa F401
9-
from .parser_block import ParserBlock # noqa F401
10-
from .parser_core import ParserCore # noqa F401
11-
from .parser_inline import ParserInline # noqa F401
7+
from . import helpers, presets
8+
from .common import normalize_url, utils
9+
from .parser_block import ParserBlock
10+
from .parser_core import ParserCore
11+
from .parser_inline import ParserInline
1212
from .renderer import RendererHTML, RendererProtocol
1313
from .rules_core.state_core import StateCore
1414
from .token import Token

markdown_it/presets/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
js_default = default
77

88

9-
class gfm_like:
9+
class gfm_like: # noqa: N801
1010
"""GitHub Flavoured Markdown (GFM) like.
1111
1212
This adds the linkify, table and strikethrough components to CommmonMark.

markdown_it/renderer.py

Lines changed: 12 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -152,19 +152,18 @@ def renderToken(
152152
if token.block:
153153
needLf = True
154154

155-
if token.nesting == 1:
156-
if idx + 1 < len(tokens):
157-
nextToken = tokens[idx + 1]
158-
159-
if nextToken.type == "inline" or nextToken.hidden:
160-
# Block-level tag containing an inline tag.
161-
#
162-
needLf = False
163-
164-
elif nextToken.nesting == -1 and nextToken.tag == token.tag:
165-
# Opening tag + closing tag of the same type. E.g. `<li></li>`.
166-
#
167-
needLf = False
155+
if token.nesting == 1 and (idx + 1 < len(tokens)):
156+
nextToken = tokens[idx + 1]
157+
158+
if nextToken.type == "inline" or nextToken.hidden: # noqa: SIM114
159+
# Block-level tag containing an inline tag.
160+
#
161+
needLf = False
162+
163+
elif nextToken.nesting == -1 and nextToken.tag == token.tag:
164+
# Opening tag + closing tag of the same type. E.g. `<li></li>`.
165+
#
166+
needLf = False
168167

169168
result += ">\n" if needLf else ">"
170169

markdown_it/ruler.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ class Ruler
3030

3131

3232
class StateBase:
33-
srcCharCode: tuple[int, ...]
33+
srcCharCode: tuple[int, ...] # noqa: N815
3434

3535
def __init__(self, src: str, md: MarkdownIt, env: EnvType):
3636
self.src = src

markdown_it/rules_block/fence.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,8 @@ def fence(state: StateBlock, startLine: int, endLine: int, silent: bool) -> bool
3838
params = state.src[pos:maximum]
3939

4040
# /* ` */
41-
if marker == 0x60:
42-
if chr(marker) in params:
43-
return False
41+
if marker == 0x60 and chr(marker) in params:
42+
return False
4443

4544
# Since start is found, we can report success here in validation mode
4645
if silent:

markdown_it/rules_block/list.py

Lines changed: 18 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -120,14 +120,17 @@ def list_block(state: StateBlock, startLine: int, endLine: int, silent: bool) ->
120120

121121
# limit conditions when list can interrupt
122122
# a paragraph (validation mode only)
123-
if silent and state.parentType == "paragraph":
124-
# Next list item should still terminate previous list item
125-
#
126-
# This code can fail if plugins use blkIndent as well as lists,
127-
# but I hope the spec gets fixed long before that happens.
128-
#
129-
if state.tShift[startLine] >= state.blkIndent:
130-
isTerminatingParagraph = True
123+
# Next list item should still terminate previous list item
124+
#
125+
# This code can fail if plugins use blkIndent as well as lists,
126+
# but I hope the spec gets fixed long before that happens.
127+
#
128+
if (
129+
silent
130+
and state.parentType == "paragraph"
131+
and state.tShift[startLine] >= state.blkIndent
132+
):
133+
isTerminatingParagraph = True
131134

132135
# Detect list type and position after marker
133136
posAfterMarker = skipOrderedListMarker(state, startLine)
@@ -149,9 +152,11 @@ def list_block(state: StateBlock, startLine: int, endLine: int, silent: bool) ->
149152

150153
# If we're starting a new unordered list right after
151154
# a paragraph, first line should not be empty.
152-
if isTerminatingParagraph:
153-
if state.skipSpaces(posAfterMarker) >= state.eMarks[startLine]:
154-
return False
155+
if (
156+
isTerminatingParagraph
157+
and state.skipSpaces(posAfterMarker) >= state.eMarks[startLine]
158+
):
159+
return False
155160

156161
# We should terminate list on style change. Remember first one to compare.
157162
markerCharCode = state.srcCharCode[posAfterMarker - 1]
@@ -209,11 +214,8 @@ def list_block(state: StateBlock, startLine: int, endLine: int, silent: bool) ->
209214

210215
contentStart = pos
211216

212-
if contentStart >= maximum:
213-
# trimming space in "- \n 3" case, indent is 1 here
214-
indentAfterMarker = 1
215-
else:
216-
indentAfterMarker = offset - initial
217+
# trimming space in "- \n 3" case, indent is 1 here
218+
indentAfterMarker = 1 if contentStart >= maximum else offset - initial
217219

218220
# If we have more than 4 spaces, the indent is 1
219221
# (the rest is just indented code block)

markdown_it/rules_block/reference.py

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -153,18 +153,17 @@ def reference(state: StateBlock, startLine: int, _endLine: int, silent: bool) ->
153153
break
154154
pos += 1
155155

156-
if pos < maximum and charCodeAt(string, pos) != 0x0A:
157-
if title:
158-
# garbage at the end of the line after title,
159-
# but it could still be a valid reference if we roll back
160-
title = ""
161-
pos = destEndPos
162-
lines = destEndLineNo
163-
while pos < maximum:
164-
ch = charCodeAt(string, pos)
165-
if not isSpace(ch):
166-
break
167-
pos += 1
156+
if pos < maximum and charCodeAt(string, pos) != 0x0A and title:
157+
# garbage at the end of the line after title,
158+
# but it could still be a valid reference if we roll back
159+
title = ""
160+
pos = destEndPos
161+
lines = destEndLineNo
162+
while pos < maximum:
163+
ch = charCodeAt(string, pos)
164+
if not isSpace(ch):
165+
break
166+
pos += 1
168167

169168
if pos < maximum and charCodeAt(string, pos) != 0x0A:
170169
# garbage at the end of the line

markdown_it/rules_block/state_block.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -202,10 +202,11 @@ def getLines(self, begin: int, end: int, indent: int, keepLastLF: bool) -> str:
202202
while line < end:
203203
lineIndent = 0
204204
lineStart = first = self.bMarks[line]
205-
if line + 1 < end or keepLastLF:
206-
last = self.eMarks[line] + 1
207-
else:
208-
last = self.eMarks[line]
205+
last = (
206+
self.eMarks[line] + 1
207+
if line + 1 < end or keepLastLF
208+
else self.eMarks[line]
209+
)
209210

210211
while (first < last) and (lineIndent < indent):
211212
ch = self.srcCharCode[first]

markdown_it/rules_core/replacements.py

Lines changed: 24 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -78,29 +78,30 @@ def replace_rare(inlineTokens: list[Token]) -> None:
7878
inside_autolink = 0
7979

8080
for token in inlineTokens:
81-
if token.type == "text" and not inside_autolink:
82-
if RARE_RE.search(token.content):
83-
# +- -> ±
84-
token.content = PLUS_MINUS_RE.sub("±", token.content)
85-
86-
# .., ..., ....... -> …
87-
token.content = ELLIPSIS_RE.sub("…", token.content)
88-
89-
# but ?..... & !..... -> ?.. & !..
90-
token.content = ELLIPSIS_QUESTION_EXCLAMATION_RE.sub(
91-
"\\1..", token.content
92-
)
93-
token.content = QUESTION_EXCLAMATION_RE.sub("\\1\\1\\1", token.content)
94-
95-
# ,, ,,, ,,,, -> ,
96-
token.content = COMMA_RE.sub(",", token.content)
97-
98-
# em-dash
99-
token.content = EM_DASH_RE.sub("\\1\u2014", token.content)
100-
101-
# en-dash
102-
token.content = EN_DASH_RE.sub("\\1\u2013", token.content)
103-
token.content = EN_DASH_INDENT_RE.sub("\\1\u2013", token.content)
81+
if (
82+
token.type == "text"
83+
and (not inside_autolink)
84+
and RARE_RE.search(token.content)
85+
):
86+
# +- -> ±
87+
token.content = PLUS_MINUS_RE.sub("±", token.content)
88+
89+
# .., ..., ....... -> …
90+
token.content = ELLIPSIS_RE.sub("…", token.content)
91+
92+
# but ?..... & !..... -> ?.. & !..
93+
token.content = ELLIPSIS_QUESTION_EXCLAMATION_RE.sub("\\1..", token.content)
94+
token.content = QUESTION_EXCLAMATION_RE.sub("\\1\\1\\1", token.content)
95+
96+
# ,, ,,, ,,,, -> ,
97+
token.content = COMMA_RE.sub(",", token.content)
98+
99+
# em-dash
100+
token.content = EM_DASH_RE.sub("\\1\u2014", token.content)
101+
102+
# en-dash
103+
token.content = EN_DASH_RE.sub("\\1\u2013", token.content)
104+
token.content = EN_DASH_INDENT_RE.sub("\\1\u2013", token.content)
104105

105106
if token.type == "link_open" and token.info == "auto":
106107
inside_autolink -= 1

markdown_it/rules_core/smartquotes.py

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -100,19 +100,17 @@ def process_inlines(tokens: list[Token], state: StateCore) -> None:
100100
isLastWhiteSpace = isWhiteSpace(lastChar)
101101
isNextWhiteSpace = isWhiteSpace(nextChar)
102102

103-
if isNextWhiteSpace:
103+
if isNextWhiteSpace: # noqa: SIM114
104+
canOpen = False
105+
elif isNextPunctChar and not (isLastWhiteSpace or isLastPunctChar):
104106
canOpen = False
105-
elif isNextPunctChar:
106-
if not (isLastWhiteSpace or isLastPunctChar):
107-
canOpen = False
108107

109-
if isLastWhiteSpace:
108+
if isLastWhiteSpace: # noqa: SIM114
109+
canClose = False
110+
elif isLastPunctChar and not (isNextWhiteSpace or isNextPunctChar):
110111
canClose = False
111-
elif isLastPunctChar:
112-
if not (isNextWhiteSpace or isNextPunctChar):
113-
canClose = False
114112

115-
if nextChar == 0x22 and t.group(0) == '"': # 0x22: "
113+
if nextChar == 0x22 and t.group(0) == '"': # 0x22: " # noqa: SIM102
116114
if lastChar >= 0x30 and lastChar <= 0x39: # 0x30: 0, 0x39: 9
117115
# special case: 1"" - count first quote as an inch
118116
canClose = canOpen = False

markdown_it/rules_inline/balance_pairs.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -60,10 +60,12 @@ def processDelimiters(state: StateInline, delimiters: list[Delimiter]) -> None:
6060
# closing delimiters must not be a multiple of 3 unless both lengths
6161
# are multiples of 3.
6262
#
63-
if opener.close or closer.open:
64-
if (opener.length + closer.length) % 3 == 0:
65-
if opener.length % 3 != 0 or closer.length % 3 != 0:
66-
isOddMatch = True
63+
if (
64+
(opener.close or closer.open)
65+
and ((opener.length + closer.length) % 3 == 0)
66+
and (opener.length % 3 != 0 or closer.length % 3 != 0)
67+
):
68+
isOddMatch = True
6769

6870
if not isOddMatch:
6971
# If previous delimiter cannot be an opener, we can safely skip

markdown_it/rules_inline/entity.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -40,12 +40,11 @@ def entity(state: StateInline, silent: bool) -> bool:
4040

4141
else:
4242
match = NAMED_RE.search(state.src[pos:])
43-
if match:
44-
if match.group(1) in entities:
45-
if not silent:
46-
state.pending += entities[match.group(1)]
47-
state.pos += len(match.group(0))
48-
return True
43+
if match and match.group(1) in entities:
44+
if not silent:
45+
state.pending += entities[match.group(1)]
46+
state.pos += len(match.group(0))
47+
return True
4948

5049
if not silent:
5150
state.pending += "&"

0 commit comments

Comments
 (0)