From e2026b92f37d3a4b995cd04a1c8cf8f2ebddd428 Mon Sep 17 00:00:00 2001
From: Adam Turner <9087854+aa-turner@users.noreply.github.com>
Date: Fri, 19 Jul 2024 10:46:46 +0100
Subject: [PATCH 01/12] GH-121970: Use Ruff to check and format the docs tools
---
.pre-commit-config.yaml | 4 +
Doc/.ruff.toml | 55 ++++++++
Doc/conf.py | 120 +++++++++-------
Doc/tools/check-warnings.py | 163 ++++++++++------------
Doc/tools/extensions/glossary_search.py | 7 +-
Doc/tools/extensions/lexers/__init__.py | 10 +-
Doc/tools/extensions/lexers/asdl_lexer.py | 36 ++---
Doc/tools/extensions/lexers/peg_lexer.py | 72 +++++-----
8 files changed, 266 insertions(+), 201 deletions(-)
create mode 100644 Doc/.ruff.toml
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index fde9d9149bf62b..a444c56c133827 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -2,6 +2,10 @@ repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.3.4
hooks:
+ - id: ruff
+ name: Run Ruff on Doc/
+ args: [--exit-non-zero-on-fix]
+ files: ^Doc/
- id: ruff
name: Run Ruff on Lib/test/
args: [--exit-non-zero-on-fix]
diff --git a/Doc/.ruff.toml b/Doc/.ruff.toml
new file mode 100644
index 00000000000000..2af7e6bd9d6050
--- /dev/null
+++ b/Doc/.ruff.toml
@@ -0,0 +1,55 @@
+target-version = "py312" # align with the version in oldest_supported_sphinx
+fix = true
+output-format = "full"
+extend-exclude = [
+ "includes/*",
+ "tools/extensions/c_annotations.py",
+ "tools/extensions/escape4chm.py",
+ "tools/extensions/patchlevel.py",
+ "tools/extensions/pyspecific.py",
+]
+
+[lint]
+preview = true
+select = [
+ "C4", # flake8-comprehensions
+ "B", # flake8-bugbear
+ "E", # pycodestyle
+ "EM", # flake8-errmsg
+ "F", # pyflakes
+ "FA", # flake8-future-annotations
+ "FLY", # flynt
+ "FURB",# refurb
+ "G", # flake8-logging-format
+ "I", # isort
+ "LOG", # flake8-logging
+ "N", # pep8-naming
+ "PERF",# perflint
+ "PGH", # pygrep-hooks
+ "PT", # flake8-pytest-style
+ "TCH", # flake8-type-checking
+ "UP", # pyupgrade
+ "W", # pycodestyle
+]
+
+[lint.per-file-ignores]
+"tools/extensions/lexers/peg_lexer.py" =[
+ "E501",
+]
+"tools/extensions/c_annotations.py" =[
+ "E501",
+]
+"tools/extensions/pyspecific.py" =[
+ "E501",
+]
+
+[format]
+preview = true
+quote-style = "single"
+exclude = [
+ "tools/extensions/lexers/*",
+ "tools/extensions/c_annotations.py",
+ "tools/extensions/escape4chm.py",
+ "tools/extensions/patchlevel.py",
+ "tools/extensions/pyspecific.py",
+]
diff --git a/Doc/conf.py b/Doc/conf.py
index 5addee0378984a..7c6201a123d5b5 100644
--- a/Doc/conf.py
+++ b/Doc/conf.py
@@ -9,6 +9,7 @@
import os
import sys
import time
+
sys.path.append(os.path.abspath('tools/extensions'))
sys.path.append(os.path.abspath('includes'))
@@ -30,20 +31,20 @@
# Skip if downstream redistributors haven't installed them
try:
- import notfound.extension
+ import notfound.extension # NoQA: F401
except ImportError:
pass
else:
extensions.append('notfound.extension')
try:
- import sphinxext.opengraph
+ import sphinxext.opengraph # NoQA: F401
except ImportError:
pass
else:
extensions.append('sphinxext.opengraph')
-doctest_global_setup = '''
+doctest_global_setup = """
try:
import _tkinter
except ImportError:
@@ -53,7 +54,7 @@
import warnings
warnings.simplefilter('error')
del warnings
-'''
+"""
manpages_url = 'https://manpages.debian.org/{path}'
@@ -63,7 +64,8 @@
# We look for the Include/patchlevel.h file in the current Python source tree
# and replace the values accordingly.
-import patchlevel
+import patchlevel # NoQA: E402
+
version, release = patchlevel.get_version_info()
rst_epilog = f"""
@@ -298,7 +300,8 @@
# Disable Docutils smartquotes for several translations
smartquotes_excludes = {
- 'languages': ['ja', 'fr', 'zh_TW', 'zh_CN'], 'builders': ['man', 'text'],
+ 'languages': ['ja', 'fr', 'zh_TW', 'zh_CN'],
+ 'builders': ['man', 'text'],
}
# Avoid a warning with Sphinx >= 4.0
@@ -319,30 +322,32 @@
'collapsiblesidebar': True,
'issues_url': '/bugs.html',
'license_url': '/license.html',
- 'root_include_title': False # We use the version switcher instead.
+ 'root_include_title': False, # We use the version switcher instead.
}
-if os.getenv("READTHEDOCS"):
- html_theme_options["hosted_on"] = 'Read the Docs'
+if os.getenv('READTHEDOCS'):
+ html_theme_options['hosted_on'] = (
+ 'Read the Docs'
+ )
# Override stylesheet fingerprinting for Windows CHM htmlhelp to fix GH-91207
# https://github.com/python/cpython/issues/91207
if any('htmlhelp' in arg for arg in sys.argv):
html_style = 'pydoctheme.css'
- print("\nWARNING: Windows CHM Help is no longer supported.")
- print("It may be removed in the future\n")
+ print('\nWARNING: Windows CHM Help is no longer supported.')
+ print('It may be removed in the future\n')
# Short title used e.g. for
HTML tags.
html_short_title = f'{release} Documentation'
# Deployment preview information
# (See .readthedocs.yml and https://docs.readthedocs.io/en/stable/reference/environment-variables.html)
-repository_url = os.getenv("READTHEDOCS_GIT_CLONE_URL")
+repository_url = os.getenv('READTHEDOCS_GIT_CLONE_URL')
html_context = {
- "is_deployment_preview": os.getenv("READTHEDOCS_VERSION_TYPE") == "external",
- "repository_url": repository_url.removesuffix(".git") if repository_url else None,
- "pr_id": os.getenv("READTHEDOCS_VERSION"),
- "enable_analytics": os.getenv("PYTHON_DOCS_ENABLE_ANALYTICS"),
+ 'is_deployment_preview': os.getenv('READTHEDOCS_VERSION_TYPE') == 'external',
+ 'repository_url': repository_url.removesuffix('.git') if repository_url else None,
+ 'pr_id': os.getenv('READTHEDOCS_VERSION'),
+ 'enable_analytics': os.getenv('PYTHON_DOCS_ENABLE_ANALYTICS'),
}
# This 'Last updated on:' timestamp is inserted at the bottom of every page.
@@ -388,7 +393,7 @@
latex_elements = {
# For the LaTeX preamble.
- 'preamble': r'''
+ 'preamble': r"""
\authoraddress{
\sphinxstrong{Python Software Foundation}\\
Email: \sphinxemail{docs@python.org}
@@ -396,7 +401,7 @@
\let\Verbatim=\OriginalVerbatim
\let\endVerbatim=\endOriginalVerbatim
\setcounter{tocdepth}{2}
-''',
+""",
# The paper size ('letter' or 'a4').
'papersize': 'a4',
# The font size ('10pt', '11pt' or '12pt').
@@ -407,30 +412,52 @@
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Guido van Rossum and the Python development team'
latex_documents = [
- ('c-api/index', 'c-api.tex',
- 'The Python/C API', _stdauthor, 'manual'),
- ('extending/index', 'extending.tex',
- 'Extending and Embedding Python', _stdauthor, 'manual'),
- ('installing/index', 'installing.tex',
- 'Installing Python Modules', _stdauthor, 'manual'),
- ('library/index', 'library.tex',
- 'The Python Library Reference', _stdauthor, 'manual'),
- ('reference/index', 'reference.tex',
- 'The Python Language Reference', _stdauthor, 'manual'),
- ('tutorial/index', 'tutorial.tex',
- 'Python Tutorial', _stdauthor, 'manual'),
- ('using/index', 'using.tex',
- 'Python Setup and Usage', _stdauthor, 'manual'),
- ('faq/index', 'faq.tex',
- 'Python Frequently Asked Questions', _stdauthor, 'manual'),
- ('whatsnew/' + version, 'whatsnew.tex',
- 'What\'s New in Python', 'A. M. Kuchling', 'howto'),
+ ('c-api/index', 'c-api.tex', 'The Python/C API', _stdauthor, 'manual'),
+ (
+ 'extending/index',
+ 'extending.tex',
+ 'Extending and Embedding Python',
+ _stdauthor,
+ 'manual',
+ ),
+ (
+ 'installing/index',
+ 'installing.tex',
+ 'Installing Python Modules',
+ _stdauthor,
+ 'manual',
+ ),
+ (
+ 'library/index',
+ 'library.tex',
+ 'The Python Library Reference',
+ _stdauthor,
+ 'manual',
+ ),
+ (
+ 'reference/index',
+ 'reference.tex',
+ 'The Python Language Reference',
+ _stdauthor,
+ 'manual',
+ ),
+ ('tutorial/index', 'tutorial.tex', 'Python Tutorial', _stdauthor, 'manual'),
+ ('using/index', 'using.tex', 'Python Setup and Usage', _stdauthor, 'manual'),
+ ('faq/index', 'faq.tex', 'Python Frequently Asked Questions', _stdauthor, 'manual'),
+ (
+ 'whatsnew/' + version,
+ 'whatsnew.tex',
+ "What's New in Python",
+ 'A. M. Kuchling',
+ 'howto',
+ ),
]
# Collect all HOWTOs individually
-latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex',
- '', _stdauthor, 'howto')
- for fn in os.listdir('howto')
- if fn.endswith('.rst') and fn != 'index.rst')
+latex_documents.extend(
+ ('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex', '', _stdauthor, 'howto')
+ for fn in os.listdir('howto')
+ if fn.endswith('.rst') and fn != 'index.rst'
+)
# Documents to append as an appendix to all manuals.
latex_appendices = ['glossary', 'about', 'license', 'copyright']
@@ -458,8 +485,7 @@
'test($|_)',
]
-coverage_ignore_classes = [
-]
+coverage_ignore_classes = []
# Glob patterns for C source files for C API coverage, relative to this directory.
coverage_c_path = [
@@ -476,7 +502,7 @@
# The coverage checker will ignore all C items whose names match these regexes
# (using re.match) -- the keys must be the same as in coverage_c_regexes.
coverage_ignore_c_items = {
-# 'cfunction': [...]
+ # 'cfunction': [...]
}
@@ -534,10 +560,10 @@
# mapping unique short aliases to a base URL and a prefix.
# https://www.sphinx-doc.org/en/master/usage/extensions/extlinks.html
extlinks = {
- "cve": ("https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-%s", "CVE-%s"),
- "cwe": ("https://cwe.mitre.org/data/definitions/%s.html", "CWE-%s"),
- "pypi": ("https://pypi.org/project/%s/", "%s"),
- "source": (SOURCE_URI, "%s"),
+ 'cve': ('https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-%s', 'CVE-%s'),
+ 'cwe': ('https://cwe.mitre.org/data/definitions/%s.html', 'CWE-%s'),
+ 'pypi': ('https://pypi.org/project/%s/', '%s'),
+ 'source': (SOURCE_URI, '%s'),
}
extlinks_detect_hardcoded_links = True
diff --git a/Doc/tools/check-warnings.py b/Doc/tools/check-warnings.py
index 67623b83d3a67d..b098d06a00b1c5 100644
--- a/Doc/tools/check-warnings.py
+++ b/Doc/tools/check-warnings.py
@@ -2,6 +2,7 @@
"""
Check the output of running Sphinx in nit-picky mode (missing references).
"""
+
from __future__ import annotations
import argparse
@@ -19,48 +20,48 @@
# Exclude these whether they're dirty or clean,
# because they trigger a rebuild of dirty files.
EXCLUDE_FILES = {
- "Doc/whatsnew/changelog.rst",
+ 'Doc/whatsnew/changelog.rst',
}
# Subdirectories of Doc/ to exclude.
EXCLUDE_SUBDIRS = {
- ".env",
- ".venv",
- "env",
- "includes",
- "venv",
+ '.env',
+ '.venv',
+ 'env',
+ 'includes',
+ 'venv',
}
# Regex pattern to match the parts of a Sphinx warning
WARNING_PATTERN = re.compile(
- r"(?P([A-Za-z]:[\\/])?[^:]+):(?P\d+): WARNING: (?P.+)"
+ r'(?P([A-Za-z]:[\\/])?[^:]+):(?P\d+): WARNING: (?P.+)'
)
# Regex pattern to match the line numbers in a Git unified diff
DIFF_PATTERN = re.compile(
- r"^@@ -(?P\d+)(?:,(?P\d+))? \+(?P\d+)(?:,(?P\d+))? @@",
+ r'^@@ -(?P\d+)(?:,(?P\d+))? \+(?P\d+)(?:,(?P\d+))? @@', # NoQA: E501
flags=re.MULTILINE,
)
-def get_diff_files(ref_a: str, ref_b: str, filter_mode: str = "") -> set[Path]:
+def get_diff_files(ref_a: str, ref_b: str, filter_mode: str = '') -> set[Path]:
"""List the files changed between two Git refs, filtered by change type."""
added_files_result = subprocess.run(
[
- "git",
- "diff",
- f"--diff-filter={filter_mode}",
- "--name-only",
- f"{ref_a}...{ref_b}",
- "--",
+ 'git',
+ 'diff',
+ f'--diff-filter={filter_mode}',
+ '--name-only',
+ f'{ref_a}...{ref_b}',
+ '--',
],
stdout=subprocess.PIPE,
check=True,
text=True,
- encoding="UTF-8",
+ encoding='UTF-8',
)
- added_files = added_files_result.stdout.strip().split("\n")
+ added_files = added_files_result.stdout.strip().split('\n')
return {Path(file.strip()) for file in added_files if file.strip()}
@@ -68,32 +69,28 @@ def get_diff_lines(ref_a: str, ref_b: str, file: Path) -> list[int]:
"""List the lines changed between two Git refs for a specific file."""
diff_output = subprocess.run(
[
- "git",
- "diff",
- "--unified=0",
- f"{ref_a}...{ref_b}",
- "--",
+ 'git',
+ 'diff',
+ '--unified=0',
+ f'{ref_a}...{ref_b}',
+ '--',
str(file),
],
stdout=subprocess.PIPE,
check=True,
text=True,
- encoding="UTF-8",
+ encoding='UTF-8',
)
# Scrape line offsets + lengths from diff and convert to line numbers
line_matches = DIFF_PATTERN.finditer(diff_output.stdout)
# Removed and added line counts are 1 if not printed
- line_match_values = [
- line_match.groupdict(default=1) for line_match in line_matches
- ]
+ line_match_values = [line_match.groupdict(default=1) for line_match in line_matches]
line_ints = [
- (int(match_value["lineb"]), int(match_value["added"]))
+ (int(match_value['lineb']), int(match_value['added']))
for match_value in line_match_values
]
- line_ranges = [
- range(line_b, line_b + added) for line_b, added in line_ints
- ]
+ line_ranges = [range(line_b, line_b + added) for line_b, added in line_ints]
line_numbers = list(itertools.chain(*line_ranges))
return line_numbers
@@ -118,13 +115,10 @@ def filter_and_parse_warnings(
) -> list[re.Match[str]]:
"""Get the warnings matching passed files and parse them with regex."""
filtered_warnings = [
- warning
- for warning in warnings
- if any(str(file) in warning for file in files)
+ warning for warning in warnings if any(str(file) in warning for file in files)
]
warning_matches = [
- WARNING_PATTERN.fullmatch(warning.strip())
- for warning in filtered_warnings
+ WARNING_PATTERN.fullmatch(warning.strip()) for warning in filtered_warnings
]
non_null_matches = [warning for warning in warning_matches if warning]
return non_null_matches
@@ -135,21 +129,17 @@ def filter_warnings_by_diff(
) -> list[re.Match[str]]:
"""Filter the passed per-file warnings to just those on changed lines."""
diff_lines = get_diff_lines(ref_a, ref_b, file)
- with file.open(encoding="UTF-8") as file_obj:
+ with file.open(encoding='UTF-8') as file_obj:
paragraphs = get_para_line_numbers(file_obj)
touched_paras = [
- para_lines
- for para_lines in paragraphs
- if set(diff_lines) & set(para_lines)
+ para_lines for para_lines in paragraphs if set(diff_lines) & set(para_lines)
]
touched_para_lines = set(itertools.chain(*touched_paras))
- warnings_infile = [
- warning for warning in warnings if str(file) in warning["file"]
- ]
+ warnings_infile = [warning for warning in warnings if str(file) in warning['file']]
warnings_touched = [
warning
for warning in warnings_infile
- if int(warning["line"]) in touched_para_lines
+ if int(warning['line']) in touched_para_lines
]
return warnings_touched
@@ -159,7 +149,7 @@ def process_touched_warnings(
) -> list[re.Match[str]]:
"""Filter a list of Sphinx warnings to those affecting touched lines."""
added_files, modified_files = tuple(
- get_diff_files(ref_a, ref_b, filter_mode=mode) for mode in ("A", "M")
+ get_diff_files(ref_a, ref_b, filter_mode=mode) for mode in ('A', 'M')
)
warnings_added = filter_and_parse_warnings(warnings, added_files)
@@ -168,7 +158,7 @@ def process_touched_warnings(
modified_files_warned = {
file
for file in modified_files
- if any(str(file) in warning["file"] for warning in warnings_modified)
+ if any(str(file) in warning['file'] for warning in warnings_modified)
}
warnings_modified_touched = [
@@ -183,7 +173,7 @@ def process_touched_warnings(
def annotate_diff(
- warnings: list[str], ref_a: str = "main", ref_b: str = "HEAD"
+ warnings: list[str], ref_a: str = 'main', ref_b: str = 'HEAD'
) -> None:
"""
Convert Sphinx warning messages to GitHub Actions for changed paragraphs.
@@ -197,12 +187,12 @@ def annotate_diff(
https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-a-warning-message
"""
warnings_touched = process_touched_warnings(warnings, ref_a, ref_b)
- print("Emitting doc warnings matching modified lines:")
+ print('Emitting doc warnings matching modified lines:')
for warning in warnings_touched:
- print("::warning file={file},line={line}::{msg}".format_map(warning))
+ print('::warning file={file},line={line}::{msg}'.format_map(warning))
print(warning[0])
if not warnings_touched:
- print("None")
+ print('None')
def fail_if_regression(
@@ -214,19 +204,19 @@ def fail_if_regression(
"""
all_rst = {
str(rst)
- for rst in Path("Doc/").rglob("*.rst")
+ for rst in Path('Doc/').rglob('*.rst')
if rst.parts[1] not in EXCLUDE_SUBDIRS
}
should_be_clean = all_rst - files_with_expected_nits - EXCLUDE_FILES
problem_files = sorted(should_be_clean & files_with_nits)
if problem_files:
- print("\nError: must not contain warnings:\n")
+ print('\nError: must not contain warnings:\n')
for filename in problem_files:
print(filename)
for warning in warnings:
if filename in warning:
if match := WARNING_PATTERN.fullmatch(warning):
- print(" {line}: {msg}".format_map(match))
+ print(' {line}: {msg}'.format_map(match))
return -1
return 0
@@ -240,10 +230,10 @@ def fail_if_improved(
"""
files_with_no_nits = files_with_expected_nits - files_with_nits
if files_with_no_nits:
- print("\nCongratulations! You improved:\n")
+ print('\nCongratulations! You improved:\n')
for filename in sorted(files_with_no_nits):
print(filename)
- print("\nPlease remove from Doc/tools/.nitignore\n")
+ print('\nPlease remove from Doc/tools/.nitignore\n')
return -1
return 0
@@ -252,21 +242,13 @@ def fail_if_new_news_nit(warnings: list[str], threshold: int) -> int:
"""
Ensure no warnings are found in the NEWS file before a given line number.
"""
- news_nits = (
- warning
- for warning in warnings
- if "/build/NEWS:" in warning
- )
+ news_nits = (warning for warning in warnings if '/build/NEWS:' in warning)
# Nits found before the threshold line
- new_news_nits = [
- nit
- for nit in news_nits
- if int(nit.split(":")[1]) <= threshold
- ]
+ new_news_nits = [nit for nit in news_nits if int(nit.split(':')[1]) <= threshold]
if new_news_nits:
- print("\nError: new NEWS nits:\n")
+ print('\nError: new NEWS nits:\n')
for warning in new_news_nits:
print(warning)
return -1
@@ -277,57 +259,58 @@ def fail_if_new_news_nit(warnings: list[str], threshold: int) -> int:
def main(argv: list[str] | None = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument(
- "--annotate-diff",
- nargs="*",
- metavar=("BASE_REF", "HEAD_REF"),
- help="Add GitHub Actions annotations on the diff for warnings on "
- "lines changed between the given refs (main and HEAD, by default)",
+ '--annotate-diff',
+ nargs='*',
+ metavar=('BASE_REF', 'HEAD_REF'),
+ help='Add GitHub Actions annotations on the diff for warnings on '
+ 'lines changed between the given refs (main and HEAD, by default)',
)
parser.add_argument(
- "--fail-if-regression",
- action="store_true",
- help="Fail if known-good files have warnings",
+ '--fail-if-regression',
+ action='store_true',
+ help='Fail if known-good files have warnings',
)
parser.add_argument(
- "--fail-if-improved",
- action="store_true",
- help="Fail if new files with no nits are found",
+ '--fail-if-improved',
+ action='store_true',
+ help='Fail if new files with no nits are found',
)
parser.add_argument(
- "--fail-if-new-news-nit",
- metavar="threshold",
+ '--fail-if-new-news-nit',
+ metavar='threshold',
type=int,
- nargs="?",
+ nargs='?',
const=NEWS_NIT_THRESHOLD,
- help="Fail if new NEWS nit found before threshold line number",
+ help='Fail if new NEWS nit found before threshold line number',
)
args = parser.parse_args(argv)
if args.annotate_diff is not None and len(args.annotate_diff) > 2:
parser.error(
- "--annotate-diff takes between 0 and 2 ref args, not "
- f"{len(args.annotate_diff)} {tuple(args.annotate_diff)}"
+ '--annotate-diff takes between 0 and 2 ref args, not '
+ f'{len(args.annotate_diff)} {tuple(args.annotate_diff)}'
)
exit_code = 0
- wrong_directory_msg = "Must run this script from the repo root"
- assert Path("Doc").exists() and Path("Doc").is_dir(), wrong_directory_msg
+ wrong_directory_msg = 'Must run this script from the repo root'
+ if not Path('Doc').exists() or not Path('Doc').is_dir():
+ raise RuntimeError(wrong_directory_msg)
- with Path("Doc/sphinx-warnings.txt").open(encoding="UTF-8") as f:
+ with Path('Doc/sphinx-warnings.txt').open(encoding='UTF-8') as f:
warnings = f.read().splitlines()
cwd = str(Path.cwd()) + os.path.sep
files_with_nits = {
- warning.removeprefix(cwd).split(":")[0]
+ warning.removeprefix(cwd).split(':')[0]
for warning in warnings
- if "Doc/" in warning
+ if 'Doc/' in warning
}
- with Path("Doc/tools/.nitignore").open(encoding="UTF-8") as clean_files:
+ with Path('Doc/tools/.nitignore').open(encoding='UTF-8') as clean_files:
files_with_expected_nits = {
filename.strip()
for filename in clean_files
- if filename.strip() and not filename.startswith("#")
+ if filename.strip() and not filename.startswith('#')
}
if args.annotate_diff is not None:
@@ -347,5 +330,5 @@ def main(argv: list[str] | None = None) -> int:
return exit_code
-if __name__ == "__main__":
+if __name__ == '__main__':
sys.exit(main())
diff --git a/Doc/tools/extensions/glossary_search.py b/Doc/tools/extensions/glossary_search.py
index 2448529125cb1f..c874aa77421444 100644
--- a/Doc/tools/extensions/glossary_search.py
+++ b/Doc/tools/extensions/glossary_search.py
@@ -32,17 +32,14 @@ def process_glossary_nodes(app: Sphinx, doctree: nodes.document, _docname: str)
definition = glossary_item[-1]
rendered = app.builder.render_partial(definition)
- terms[term.lower()] = {
- 'title': term,
- 'body': rendered['html_body']
- }
+ terms[term.lower()] = {'title': term, 'body': rendered['html_body']}
def write_glossary_json(app: Sphinx, _exc: Exception) -> None:
if not getattr(app.env, 'glossary_terms', None):
return
- logger.info(f'Writing glossary.json', color='green')
+ logger.info('Writing glossary.json', color='green')
dest = Path(app.outdir, '_static', 'glossary.json')
dest.parent.mkdir(exist_ok=True)
dest.write_text(json.dumps(app.env.glossary_terms), encoding='utf-8')
diff --git a/Doc/tools/extensions/lexers/__init__.py b/Doc/tools/extensions/lexers/__init__.py
index e12ac5be8139cc..6b4746a762ced3 100644
--- a/Doc/tools/extensions/lexers/__init__.py
+++ b/Doc/tools/extensions/lexers/__init__.py
@@ -4,12 +4,12 @@
def setup(app):
# Used for highlighting Parser/Python.asdl in library/ast.rst
- app.add_lexer("asdl", ASDLLexer)
+ app.add_lexer('asdl', ASDLLexer)
# Used for highlighting Grammar/python.gram in reference/grammar.rst
- app.add_lexer("peg", PEGLexer)
+ app.add_lexer('peg', PEGLexer)
return {
- "version": "1.0",
- "parallel_read_safe": True,
- "parallel_write_safe": True,
+ 'version': '1.0',
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
}
diff --git a/Doc/tools/extensions/lexers/asdl_lexer.py b/Doc/tools/extensions/lexers/asdl_lexer.py
index 2cea058566ad85..9e937ac8c2e4df 100644
--- a/Doc/tools/extensions/lexers/asdl_lexer.py
+++ b/Doc/tools/extensions/lexers/asdl_lexer.py
@@ -3,40 +3,40 @@
class ASDLLexer(RegexLexer):
- name = "ASDL"
- aliases = ["asdl"]
- filenames = ["*.asdl"]
- _name = r"([^\W\d]\w*)"
- _text_ws = r"(\s*)"
+ name = 'ASDL'
+ aliases = ['asdl']
+ filenames = ['*.asdl']
+ _name = r'([^\W\d]\w*)'
+ _text_ws = r'(\s*)'
tokens = {
- "ws": [
- (r"\n", Text),
- (r"\s+", Text),
- (r"--.*?$", Comment.Singleline),
+ 'ws': [
+ (r'\n', Text),
+ (r'\s+', Text),
+ (r'--.*?$', Comment.Singleline),
],
- "root": [
- include("ws"),
+ 'root': [
+ include('ws'),
(
- r"(module)" + _text_ws + _name,
+ r'(module)' + _text_ws + _name,
bygroups(Keyword, Text, Name.Tag),
),
(
- r"(\w+)(\*\s|\?\s|\s)(\w+)",
+ r'(\w+)(\*\s|\?\s|\s)(\w+)',
bygroups(Name.Builtin.Pseudo, Operator, Name),
),
# Keep in line with ``builtin_types`` from Parser/asdl.py.
# ASDL's 4 builtin types are
# constant, identifier, int, string
('constant|identifier|int|string', Name.Builtin),
- (r"attributes", Name.Builtin),
+ (r'attributes', Name.Builtin),
(
- _name + _text_ws + "(=)",
+ _name + _text_ws + '(=)',
bygroups(Name, Text, Operator),
),
(_name, Name.Class),
- (r"\|", Operator),
- (r"{|}|\(|\)", Punctuation),
- (r".", Text),
+ (r'\|', Operator),
+ (r'{|}|\(|\)', Punctuation),
+ (r'.', Text),
],
}
diff --git a/Doc/tools/extensions/lexers/peg_lexer.py b/Doc/tools/extensions/lexers/peg_lexer.py
index 827af205583f61..5c4bf1bd7385a4 100644
--- a/Doc/tools/extensions/lexers/peg_lexer.py
+++ b/Doc/tools/extensions/lexers/peg_lexer.py
@@ -16,64 +16,64 @@ class PEGLexer(RegexLexer):
- Rules named `invalid_*` or `incorrect_*`
"""
- name = "PEG"
- aliases = ["peg"]
- filenames = ["*.gram"]
- _name = r"([^\W\d]\w*)"
- _text_ws = r"(\s*)"
+ name = 'PEG'
+ aliases = ['peg']
+ filenames = ['*.gram']
+ _name = r'([^\W\d]\w*)'
+ _text_ws = r'(\s*)'
tokens = {
- "ws": [(r"\n", Text), (r"\s+", Text), (r"#.*$", Comment.Singleline),],
- "lookaheads": [
+ 'ws': [(r'\n', Text), (r'\s+', Text), (r'#.*$', Comment.Singleline),],
+ 'lookaheads': [
# Forced tokens
- (r"(&&)(?=\w+\s?)", bygroups(None)),
+ (r'(&&)(?=\w+\s?)', bygroups(None)),
(r"(&&)(?='.+'\s?)", bygroups(None)),
(r'(&&)(?=".+"\s?)', bygroups(None)),
- (r"(&&)(?=\(.+\)\s?)", bygroups(None)),
+ (r'(&&)(?=\(.+\)\s?)', bygroups(None)),
- (r"(?<=\|\s)(&\w+\s?)", bygroups(None)),
+ (r'(?<=\|\s)(&\w+\s?)', bygroups(None)),
(r"(?<=\|\s)(&'.+'\s?)", bygroups(None)),
(r'(?<=\|\s)(&".+"\s?)', bygroups(None)),
- (r"(?<=\|\s)(&\(.+\)\s?)", bygroups(None)),
+ (r'(?<=\|\s)(&\(.+\)\s?)', bygroups(None)),
],
- "metas": [
+ 'metas': [
(r"(@\w+ '''(.|\n)+?''')", bygroups(None)),
- (r"^(@.*)$", bygroups(None)),
+ (r'^(@.*)$', bygroups(None)),
],
- "actions": [
- (r"{(.|\n)+?}", bygroups(None)),
+ 'actions': [
+ (r'{(.|\n)+?}', bygroups(None)),
],
- "strings": [
+ 'strings': [
(r"'\w+?'", Keyword),
(r'"\w+?"', Keyword),
(r"'\W+?'", Text),
(r'"\W+?"', Text),
],
- "variables": [
- (_name + _text_ws + "(=)", bygroups(None, None, None),),
- (_name + _text_ws + r"(\[[\w\d_\*]+?\])" + _text_ws + "(=)", bygroups(None, None, None, None, None),),
+ 'variables': [
+ (_name + _text_ws + '(=)', bygroups(None, None, None),),
+ (_name + _text_ws + r'(\[[\w\d_\*]+?\])' + _text_ws + '(=)', bygroups(None, None, None, None, None),),
],
- "invalids": [
- (r"^(\s+\|\s+.*invalid_\w+.*\n)", bygroups(None)),
- (r"^(\s+\|\s+.*incorrect_\w+.*\n)", bygroups(None)),
- (r"^(#.*invalid syntax.*(?:.|\n)*)", bygroups(None),),
+ 'invalids': [
+ (r'^(\s+\|\s+.*invalid_\w+.*\n)', bygroups(None)),
+ (r'^(\s+\|\s+.*incorrect_\w+.*\n)', bygroups(None)),
+ (r'^(#.*invalid syntax.*(?:.|\n)*)', bygroups(None),),
],
- "root": [
- include("invalids"),
- include("ws"),
- include("lookaheads"),
- include("metas"),
- include("actions"),
- include("strings"),
- include("variables"),
- (r"\b(?!(NULL|EXTRA))([A-Z_]+)\b\s*(?!\()", Text,),
+ 'root': [
+ include('invalids'),
+ include('ws'),
+ include('lookaheads'),
+ include('metas'),
+ include('actions'),
+ include('strings'),
+ include('variables'),
+ (r'\b(?!(NULL|EXTRA))([A-Z_]+)\b\s*(?!\()', Text,),
(
- r"^\s*" + _name + r"\s*" + r"(\[.*\])?" + r"\s*" + r"(\(.+\))?" + r"\s*(:)",
+ r'^\s*' + _name + r'\s*' + r'(\[.*\])?' + r'\s*' + r'(\(.+\))?' + r'\s*(:)',
bygroups(Name.Function, None, None, Punctuation),
),
(_name, Name.Function),
- (r"[\||\.|\+|\*|\?]", Operator),
- (r"{|}|\(|\)|\[|\]", Punctuation),
- (r".", Text),
+ (r'[\||\.|\+|\*|\?]', Operator),
+ (r'{|}|\(|\)|\[|\]', Punctuation),
+ (r'.', Text),
],
}
From a882565564738658b28bc5af1d3737b0fe4411ea Mon Sep 17 00:00:00 2001
From: Adam Turner <9087854+aa-turner@users.noreply.github.com>
Date: Fri, 19 Jul 2024 11:22:40 +0100
Subject: [PATCH 02/12] Add ruff-format pre-commit
---
.pre-commit-config.yaml | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index a444c56c133827..44d353d25c7190 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,19 +1,23 @@
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.3.4
+ rev: v0.5.3
hooks:
- id: ruff
- name: Run Ruff on Doc/
+ name: Run Ruff (check) on Doc/
args: [--exit-non-zero-on-fix]
files: ^Doc/
- id: ruff
- name: Run Ruff on Lib/test/
+ name: Run Ruff (check) on Lib/test/
args: [--exit-non-zero-on-fix]
files: ^Lib/test/
- id: ruff
- name: Run Ruff on Argument Clinic
+ name: Run Ruff (check) on Argument Clinic
args: [--exit-non-zero-on-fix, --config=Tools/clinic/.ruff.toml]
files: ^Tools/clinic/|Lib/test/test_clinic.py
+ - id: ruff-format
+ name: Run Ruff (format) on Doc/
+ args: [--exit-non-zero-on-fix]
+ files: ^Doc/
- repo: https://github.com/psf/black-pre-commit-mirror
rev: 24.4.2
From 13cb3f39c776026e8d60ab9a304143c5a43058f7 Mon Sep 17 00:00:00 2001
From: Adam Turner <9087854+aa-turner@users.noreply.github.com>
Date: Fri, 19 Jul 2024 11:25:14 +0100
Subject: [PATCH 03/12] Disable E501
---
Doc/.ruff.toml | 12 ++----------
Doc/tools/check-warnings.py | 2 +-
2 files changed, 3 insertions(+), 11 deletions(-)
diff --git a/Doc/.ruff.toml b/Doc/.ruff.toml
index 2af7e6bd9d6050..05221729d41815 100644
--- a/Doc/.ruff.toml
+++ b/Doc/.ruff.toml
@@ -31,16 +31,8 @@ select = [
"UP", # pyupgrade
"W", # pycodestyle
]
-
-[lint.per-file-ignores]
-"tools/extensions/lexers/peg_lexer.py" =[
- "E501",
-]
-"tools/extensions/c_annotations.py" =[
- "E501",
-]
-"tools/extensions/pyspecific.py" =[
- "E501",
+ignore = [
+ "E501", # We use auto-formatting, so ignore line length errors
]
[format]
diff --git a/Doc/tools/check-warnings.py b/Doc/tools/check-warnings.py
index b098d06a00b1c5..b974673d97a941 100644
--- a/Doc/tools/check-warnings.py
+++ b/Doc/tools/check-warnings.py
@@ -39,7 +39,7 @@
# Regex pattern to match the line numbers in a Git unified diff
DIFF_PATTERN = re.compile(
- r'^@@ -(?P\d+)(?:,(?P\d+))? \+(?P\d+)(?:,(?P\d+))? @@', # NoQA: E501
+ r'^@@ -(?P\d+)(?:,(?P\d+))? \+(?P\d+)(?:,(?P\d+))? @@',
flags=re.MULTILINE,
)
From 933095c705a91b0dc1b864c1c4294766b96ad587 Mon Sep 17 00:00:00 2001
From: Adam Turner <9087854+aa-turner@users.noreply.github.com>
Date: Fri, 19 Jul 2024 11:27:39 +0100
Subject: [PATCH 04/12] Try pre-commit with '--check'
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 44d353d25c7190..d611a303510227 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -16,7 +16,7 @@ repos:
files: ^Tools/clinic/|Lib/test/test_clinic.py
- id: ruff-format
name: Run Ruff (format) on Doc/
- args: [--exit-non-zero-on-fix]
+ args: [--check]
files: ^Doc/
- repo: https://github.com/psf/black-pre-commit-mirror
From a9228fe43238f0985f51d4bcaaa9283430c3a00a Mon Sep 17 00:00:00 2001
From: Adam Turner <9087854+aa-turner@users.noreply.github.com>
Date: Fri, 19 Jul 2024 11:29:38 +0100
Subject: [PATCH 05/12] Remove flake8-errmsg
---
Doc/.ruff.toml | 1 -
1 file changed, 1 deletion(-)
diff --git a/Doc/.ruff.toml b/Doc/.ruff.toml
index 05221729d41815..511643f54961ef 100644
--- a/Doc/.ruff.toml
+++ b/Doc/.ruff.toml
@@ -15,7 +15,6 @@ select = [
"C4", # flake8-comprehensions
"B", # flake8-bugbear
"E", # pycodestyle
- "EM", # flake8-errmsg
"F", # pyflakes
"FA", # flake8-future-annotations
"FLY", # flynt
From efbee68cee73e8e40f683c588d848d3ac3487647 Mon Sep 17 00:00:00 2001
From: Adam Turner <9087854+aa-turner@users.noreply.github.com>
Date: Fri, 19 Jul 2024 11:32:03 +0100
Subject: [PATCH 06/12] Revert the version bump (new Lib/test failures)
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index d611a303510227..23410f24fac6c4 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.5.3
+ rev: v0.3.4
hooks:
- id: ruff
name: Run Ruff (check) on Doc/
From 7bd87a38d233b6d8cedacbb76813f8a0a5c4cbf0 Mon Sep 17 00:00:00 2001
From: Adam Turner <9087854+aa-turner@users.noreply.github.com>
Date: Fri, 19 Jul 2024 11:38:37 +0100
Subject: [PATCH 07/12] Update comments in .ruff.toml
---
Doc/.ruff.toml | 38 +++++++++++++++++++-------------------
1 file changed, 19 insertions(+), 19 deletions(-)
diff --git a/Doc/.ruff.toml b/Doc/.ruff.toml
index 511643f54961ef..06eb59f05cfbb7 100644
--- a/Doc/.ruff.toml
+++ b/Doc/.ruff.toml
@@ -1,4 +1,4 @@
-target-version = "py312" # align with the version in oldest_supported_sphinx
+target-version = "py312" # Align with the version in oldest_supported_sphinx
fix = true
output-format = "full"
extend-exclude = [
@@ -12,26 +12,26 @@ extend-exclude = [
[lint]
preview = true
select = [
- "C4", # flake8-comprehensions
- "B", # flake8-bugbear
- "E", # pycodestyle
- "F", # pyflakes
- "FA", # flake8-future-annotations
- "FLY", # flynt
- "FURB",# refurb
- "G", # flake8-logging-format
- "I", # isort
- "LOG", # flake8-logging
- "N", # pep8-naming
- "PERF",# perflint
- "PGH", # pygrep-hooks
- "PT", # flake8-pytest-style
- "TCH", # flake8-type-checking
- "UP", # pyupgrade
- "W", # pycodestyle
+ "C4", # flake8-comprehensions
+ "B", # flake8-bugbear
+ "E", # pycodestyle
+ "F", # pyflakes
+ "FA", # flake8-future-annotations
+ "FLY", # flynt
+ "FURB", # refurb
+ "G", # flake8-logging-format
+ "I", # isort
+ "LOG", # flake8-logging
+ "N", # pep8-naming
+ "PERF", # perflint
+ "PGH", # pygrep-hooks
+ "PT", # flake8-pytest-style
+ "TCH", # flake8-type-checking
+ "UP", # pyupgrade
+ "W", # pycodestyle
]
ignore = [
- "E501", # We use auto-formatting, so ignore line length errors
+ "E501", # Ignore line length errors (we use auto-formatting)
]
[format]
From 65ceaa59def4ad3fec356a0a100b004f62c26bf4 Mon Sep 17 00:00:00 2001
From: Adam Turner <9087854+aa-turner@users.noreply.github.com>
Date: Fri, 19 Jul 2024 11:45:56 +0100
Subject: [PATCH 08/12] Resolve Alex's notes
Co-authored-by: Alex Waygood
---
.pre-commit-config.yaml | 6 +++---
Doc/.ruff.toml | 6 ++----
2 files changed, 5 insertions(+), 7 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 23410f24fac6c4..b10be5b6bd9904 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -3,15 +3,15 @@ repos:
rev: v0.3.4
hooks:
- id: ruff
- name: Run Ruff (check) on Doc/
+ name: Run Ruff (lint) on Doc/
args: [--exit-non-zero-on-fix]
files: ^Doc/
- id: ruff
- name: Run Ruff (check) on Lib/test/
+ name: Run Ruff (lint) on Lib/test/
args: [--exit-non-zero-on-fix]
files: ^Lib/test/
- id: ruff
- name: Run Ruff (check) on Argument Clinic
+ name: Run Ruff (lint) on Argument Clinic
args: [--exit-non-zero-on-fix, --config=Tools/clinic/.ruff.toml]
files: ^Tools/clinic/|Lib/test/test_clinic.py
- id: ruff-format
diff --git a/Doc/.ruff.toml b/Doc/.ruff.toml
index 06eb59f05cfbb7..c3c8fa5b59db3d 100644
--- a/Doc/.ruff.toml
+++ b/Doc/.ruff.toml
@@ -3,6 +3,7 @@ fix = true
output-format = "full"
extend-exclude = [
"includes/*",
+ # Temporary exclusions:
"tools/extensions/c_annotations.py",
"tools/extensions/escape4chm.py",
"tools/extensions/patchlevel.py",
@@ -37,10 +38,7 @@ ignore = [
[format]
preview = true
quote-style = "single"
+docstring-code-format = true
exclude = [
"tools/extensions/lexers/*",
- "tools/extensions/c_annotations.py",
- "tools/extensions/escape4chm.py",
- "tools/extensions/patchlevel.py",
- "tools/extensions/pyspecific.py",
]
From 9e95b255310e983314a4d7b8f57754fefac24f98 Mon Sep 17 00:00:00 2001
From: Adam Turner <9087854+aa-turner@users.noreply.github.com>
Date: Fri, 19 Jul 2024 12:01:37 +0100
Subject: [PATCH 09/12] Resolve Hugo's notes
---
Doc/.ruff.toml | 1 +
Doc/conf.py | 6 +++---
2 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/Doc/.ruff.toml b/Doc/.ruff.toml
index c3c8fa5b59db3d..e74a954f403e3a 100644
--- a/Doc/.ruff.toml
+++ b/Doc/.ruff.toml
@@ -1,6 +1,7 @@
target-version = "py312" # Align with the version in oldest_supported_sphinx
fix = true
output-format = "full"
+line-length = 79
extend-exclude = [
"includes/*",
# Temporary exclusions:
diff --git a/Doc/conf.py b/Doc/conf.py
index 7c6201a123d5b5..3250821466ba31 100644
--- a/Doc/conf.py
+++ b/Doc/conf.py
@@ -31,13 +31,13 @@
# Skip if downstream redistributors haven't installed them
try:
- import notfound.extension # NoQA: F401
+ import notfound.extension # noqa: F401
except ImportError:
pass
else:
extensions.append('notfound.extension')
try:
- import sphinxext.opengraph # NoQA: F401
+ import sphinxext.opengraph # noqa: F401
except ImportError:
pass
else:
@@ -64,7 +64,7 @@
# We look for the Include/patchlevel.h file in the current Python source tree
# and replace the values accordingly.
-import patchlevel # NoQA: E402
+import patchlevel # noqa: E402
version, release = patchlevel.get_version_info()
From dc85abebec83ce42d09dcd1faa13ac77aa3a9114 Mon Sep 17 00:00:00 2001
From: Adam Turner <9087854+aa-turner@users.noreply.github.com>
Date: Fri, 19 Jul 2024 12:04:24 +0100
Subject: [PATCH 10/12] Reformat with new settings
---
Doc/conf.py | 36 ++++++++++++++++++++-----
Doc/tools/check-warnings.py | 35 +++++++++++++++++-------
Doc/tools/extensions/glossary_search.py | 11 ++++++--
3 files changed, 64 insertions(+), 18 deletions(-)
diff --git a/Doc/conf.py b/Doc/conf.py
index 3250821466ba31..0d61d7817995bc 100644
--- a/Doc/conf.py
+++ b/Doc/conf.py
@@ -342,17 +342,21 @@
# Deployment preview information
# (See .readthedocs.yml and https://docs.readthedocs.io/en/stable/reference/environment-variables.html)
-repository_url = os.getenv('READTHEDOCS_GIT_CLONE_URL')
+is_deployment_preview = os.getenv('READTHEDOCS_VERSION_TYPE') == 'external'
+repository_url = os.getenv('READTHEDOCS_GIT_CLONE_URL', '')
+repository_url = repository_url.removesuffix('.git')
html_context = {
- 'is_deployment_preview': os.getenv('READTHEDOCS_VERSION_TYPE') == 'external',
- 'repository_url': repository_url.removesuffix('.git') if repository_url else None,
+ 'is_deployment_preview': is_deployment_preview,
+ 'repository_url': repository_url if repository_url else None,
'pr_id': os.getenv('READTHEDOCS_VERSION'),
'enable_analytics': os.getenv('PYTHON_DOCS_ENABLE_ANALYTICS'),
}
# This 'Last updated on:' timestamp is inserted at the bottom of every page.
html_time = int(os.environ.get('SOURCE_DATE_EPOCH', time.time()))
-html_last_updated_fmt = time.strftime('%b %d, %Y (%H:%M UTC)', time.gmtime(html_time))
+html_last_updated_fmt = time.strftime(
+ '%b %d, %Y (%H:%M UTC)', time.gmtime(html_time)
+)
# Path to find HTML templates.
templates_path = ['tools/templates']
@@ -441,9 +445,27 @@
_stdauthor,
'manual',
),
- ('tutorial/index', 'tutorial.tex', 'Python Tutorial', _stdauthor, 'manual'),
- ('using/index', 'using.tex', 'Python Setup and Usage', _stdauthor, 'manual'),
- ('faq/index', 'faq.tex', 'Python Frequently Asked Questions', _stdauthor, 'manual'),
+ (
+ 'tutorial/index',
+ 'tutorial.tex',
+ 'Python Tutorial',
+ _stdauthor,
+ 'manual',
+ ),
+ (
+ 'using/index',
+ 'using.tex',
+ 'Python Setup and Usage',
+ _stdauthor,
+ 'manual',
+ ),
+ (
+ 'faq/index',
+ 'faq.tex',
+ 'Python Frequently Asked Questions',
+ _stdauthor,
+ 'manual',
+ ),
(
'whatsnew/' + version,
'whatsnew.tex',
diff --git a/Doc/tools/check-warnings.py b/Doc/tools/check-warnings.py
index b974673d97a941..a6b164d7b7a621 100644
--- a/Doc/tools/check-warnings.py
+++ b/Doc/tools/check-warnings.py
@@ -85,12 +85,16 @@ def get_diff_lines(ref_a: str, ref_b: str, file: Path) -> list[int]:
# Scrape line offsets + lengths from diff and convert to line numbers
line_matches = DIFF_PATTERN.finditer(diff_output.stdout)
# Removed and added line counts are 1 if not printed
- line_match_values = [line_match.groupdict(default=1) for line_match in line_matches]
+ line_match_values = [
+ line_match.groupdict(default=1) for line_match in line_matches
+ ]
line_ints = [
(int(match_value['lineb']), int(match_value['added']))
for match_value in line_match_values
]
- line_ranges = [range(line_b, line_b + added) for line_b, added in line_ints]
+ line_ranges = [
+ range(line_b, line_b + added) for line_b, added in line_ints
+ ]
line_numbers = list(itertools.chain(*line_ranges))
return line_numbers
@@ -115,10 +119,13 @@ def filter_and_parse_warnings(
) -> list[re.Match[str]]:
"""Get the warnings matching passed files and parse them with regex."""
filtered_warnings = [
- warning for warning in warnings if any(str(file) in warning for file in files)
+ warning
+ for warning in warnings
+ if any(str(file) in warning for file in files)
]
warning_matches = [
- WARNING_PATTERN.fullmatch(warning.strip()) for warning in filtered_warnings
+ WARNING_PATTERN.fullmatch(warning.strip())
+ for warning in filtered_warnings
]
non_null_matches = [warning for warning in warning_matches if warning]
return non_null_matches
@@ -132,10 +139,14 @@ def filter_warnings_by_diff(
with file.open(encoding='UTF-8') as file_obj:
paragraphs = get_para_line_numbers(file_obj)
touched_paras = [
- para_lines for para_lines in paragraphs if set(diff_lines) & set(para_lines)
+ para_lines
+ for para_lines in paragraphs
+ if set(diff_lines) & set(para_lines)
]
touched_para_lines = set(itertools.chain(*touched_paras))
- warnings_infile = [warning for warning in warnings if str(file) in warning['file']]
+ warnings_infile = [
+ warning for warning in warnings if str(file) in warning['file']
+ ]
warnings_touched = [
warning
for warning in warnings_infile
@@ -196,7 +207,9 @@ def annotate_diff(
def fail_if_regression(
- warnings: list[str], files_with_expected_nits: set[str], files_with_nits: set[str]
+ warnings: list[str],
+ files_with_expected_nits: set[str],
+ files_with_nits: set[str],
) -> int:
"""
Ensure some files always pass Sphinx nit-picky mode (no missing references).
@@ -245,7 +258,9 @@ def fail_if_new_news_nit(warnings: list[str], threshold: int) -> int:
news_nits = (warning for warning in warnings if '/build/NEWS:' in warning)
# Nits found before the threshold line
- new_news_nits = [nit for nit in news_nits if int(nit.split(':')[1]) <= threshold]
+ new_news_nits = [
+ nit for nit in news_nits if int(nit.split(':')[1]) <= threshold
+ ]
if new_news_nits:
print('\nError: new NEWS nits:\n')
@@ -322,7 +337,9 @@ def main(argv: list[str] | None = None) -> int:
)
if args.fail_if_improved:
- exit_code += fail_if_improved(files_with_expected_nits, files_with_nits)
+ exit_code += fail_if_improved(
+ files_with_expected_nits, files_with_nits
+ )
if args.fail_if_new_news_nit:
exit_code += fail_if_new_news_nit(warnings, args.fail_if_new_news_nit)
diff --git a/Doc/tools/extensions/glossary_search.py b/Doc/tools/extensions/glossary_search.py
index c874aa77421444..502b6cd95bcb94 100644
--- a/Doc/tools/extensions/glossary_search.py
+++ b/Doc/tools/extensions/glossary_search.py
@@ -17,7 +17,11 @@
logger = logging.getLogger(__name__)
-def process_glossary_nodes(app: Sphinx, doctree: nodes.document, _docname: str) -> None:
+def process_glossary_nodes(
+ app: Sphinx,
+ doctree: nodes.document,
+ _docname: str,
+) -> None:
if app.builder.format != 'html' or app.builder.embedded:
return
@@ -32,7 +36,10 @@ def process_glossary_nodes(app: Sphinx, doctree: nodes.document, _docname: str)
definition = glossary_item[-1]
rendered = app.builder.render_partial(definition)
- terms[term.lower()] = {'title': term, 'body': rendered['html_body']}
+ terms[term.lower()] = {
+ 'title': term,
+ 'body': rendered['html_body'],
+ }
def write_glossary_json(app: Sphinx, _exc: Exception) -> None:
From a599ef7a3b3c028bd720089c4a223fc77481ab90 Mon Sep 17 00:00:00 2001
From: Adam Turner <9087854+aa-turner@users.noreply.github.com>
Date: Fri, 19 Jul 2024 12:08:28 +0100
Subject: [PATCH 11/12] Use quoute-style=preserve
---
Doc/.ruff.toml | 2 +-
Doc/conf.py | 4 +-
Doc/tools/check-warnings.py | 132 +++++++++++-----------
Doc/tools/extensions/lexers/__init__.py | 10 +-
Doc/tools/extensions/lexers/asdl_lexer.py | 38 +++----
Doc/tools/extensions/lexers/peg_lexer.py | 72 ++++++------
6 files changed, 129 insertions(+), 129 deletions(-)
diff --git a/Doc/.ruff.toml b/Doc/.ruff.toml
index e74a954f403e3a..b617208f78ef6f 100644
--- a/Doc/.ruff.toml
+++ b/Doc/.ruff.toml
@@ -38,7 +38,7 @@ ignore = [
[format]
preview = true
-quote-style = "single"
+quote-style = "preserve"
docstring-code-format = true
exclude = [
"tools/extensions/lexers/*",
diff --git a/Doc/conf.py b/Doc/conf.py
index 0d61d7817995bc..ce8378bb21aed4 100644
--- a/Doc/conf.py
+++ b/Doc/conf.py
@@ -347,7 +347,7 @@
repository_url = repository_url.removesuffix('.git')
html_context = {
'is_deployment_preview': is_deployment_preview,
- 'repository_url': repository_url if repository_url else None,
+ 'repository_url': repository_url or None,
'pr_id': os.getenv('READTHEDOCS_VERSION'),
'enable_analytics': os.getenv('PYTHON_DOCS_ENABLE_ANALYTICS'),
}
@@ -524,7 +524,7 @@
# The coverage checker will ignore all C items whose names match these regexes
# (using re.match) -- the keys must be the same as in coverage_c_regexes.
coverage_ignore_c_items = {
- # 'cfunction': [...]
+ # 'cfunction': [...]
}
diff --git a/Doc/tools/check-warnings.py b/Doc/tools/check-warnings.py
index a6b164d7b7a621..c686eecf8d9271 100644
--- a/Doc/tools/check-warnings.py
+++ b/Doc/tools/check-warnings.py
@@ -20,48 +20,48 @@
# Exclude these whether they're dirty or clean,
# because they trigger a rebuild of dirty files.
EXCLUDE_FILES = {
- 'Doc/whatsnew/changelog.rst',
+ "Doc/whatsnew/changelog.rst",
}
# Subdirectories of Doc/ to exclude.
EXCLUDE_SUBDIRS = {
- '.env',
- '.venv',
- 'env',
- 'includes',
- 'venv',
+ ".env",
+ ".venv",
+ "env",
+ "includes",
+ "venv",
}
# Regex pattern to match the parts of a Sphinx warning
WARNING_PATTERN = re.compile(
- r'(?P([A-Za-z]:[\\/])?[^:]+):(?P\d+): WARNING: (?P.+)'
+ r"(?P([A-Za-z]:[\\/])?[^:]+):(?P\d+): WARNING: (?P.+)"
)
# Regex pattern to match the line numbers in a Git unified diff
DIFF_PATTERN = re.compile(
- r'^@@ -(?P\d+)(?:,(?P\d+))? \+(?P\d+)(?:,(?P\d+))? @@',
+ r"^@@ -(?P\d+)(?:,(?P\d+))? \+(?P\d+)(?:,(?P\d+))? @@",
flags=re.MULTILINE,
)
-def get_diff_files(ref_a: str, ref_b: str, filter_mode: str = '') -> set[Path]:
+def get_diff_files(ref_a: str, ref_b: str, filter_mode: str = "") -> set[Path]:
"""List the files changed between two Git refs, filtered by change type."""
added_files_result = subprocess.run(
[
- 'git',
- 'diff',
- f'--diff-filter={filter_mode}',
- '--name-only',
- f'{ref_a}...{ref_b}',
- '--',
+ "git",
+ "diff",
+ f"--diff-filter={filter_mode}",
+ "--name-only",
+ f"{ref_a}...{ref_b}",
+ "--",
],
stdout=subprocess.PIPE,
check=True,
text=True,
- encoding='UTF-8',
+ encoding="UTF-8",
)
- added_files = added_files_result.stdout.strip().split('\n')
+ added_files = added_files_result.stdout.strip().split("\n")
return {Path(file.strip()) for file in added_files if file.strip()}
@@ -69,17 +69,17 @@ def get_diff_lines(ref_a: str, ref_b: str, file: Path) -> list[int]:
"""List the lines changed between two Git refs for a specific file."""
diff_output = subprocess.run(
[
- 'git',
- 'diff',
- '--unified=0',
- f'{ref_a}...{ref_b}',
- '--',
+ "git",
+ "diff",
+ "--unified=0",
+ f"{ref_a}...{ref_b}",
+ "--",
str(file),
],
stdout=subprocess.PIPE,
check=True,
text=True,
- encoding='UTF-8',
+ encoding="UTF-8",
)
# Scrape line offsets + lengths from diff and convert to line numbers
@@ -89,7 +89,7 @@ def get_diff_lines(ref_a: str, ref_b: str, file: Path) -> list[int]:
line_match.groupdict(default=1) for line_match in line_matches
]
line_ints = [
- (int(match_value['lineb']), int(match_value['added']))
+ (int(match_value["lineb"]), int(match_value["added"]))
for match_value in line_match_values
]
line_ranges = [
@@ -136,7 +136,7 @@ def filter_warnings_by_diff(
) -> list[re.Match[str]]:
"""Filter the passed per-file warnings to just those on changed lines."""
diff_lines = get_diff_lines(ref_a, ref_b, file)
- with file.open(encoding='UTF-8') as file_obj:
+ with file.open(encoding="UTF-8") as file_obj:
paragraphs = get_para_line_numbers(file_obj)
touched_paras = [
para_lines
@@ -145,12 +145,12 @@ def filter_warnings_by_diff(
]
touched_para_lines = set(itertools.chain(*touched_paras))
warnings_infile = [
- warning for warning in warnings if str(file) in warning['file']
+ warning for warning in warnings if str(file) in warning["file"]
]
warnings_touched = [
warning
for warning in warnings_infile
- if int(warning['line']) in touched_para_lines
+ if int(warning["line"]) in touched_para_lines
]
return warnings_touched
@@ -160,7 +160,7 @@ def process_touched_warnings(
) -> list[re.Match[str]]:
"""Filter a list of Sphinx warnings to those affecting touched lines."""
added_files, modified_files = tuple(
- get_diff_files(ref_a, ref_b, filter_mode=mode) for mode in ('A', 'M')
+ get_diff_files(ref_a, ref_b, filter_mode=mode) for mode in ("A", "M")
)
warnings_added = filter_and_parse_warnings(warnings, added_files)
@@ -169,7 +169,7 @@ def process_touched_warnings(
modified_files_warned = {
file
for file in modified_files
- if any(str(file) in warning['file'] for warning in warnings_modified)
+ if any(str(file) in warning["file"] for warning in warnings_modified)
}
warnings_modified_touched = [
@@ -184,7 +184,7 @@ def process_touched_warnings(
def annotate_diff(
- warnings: list[str], ref_a: str = 'main', ref_b: str = 'HEAD'
+ warnings: list[str], ref_a: str = "main", ref_b: str = "HEAD"
) -> None:
"""
Convert Sphinx warning messages to GitHub Actions for changed paragraphs.
@@ -198,12 +198,12 @@ def annotate_diff(
https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-a-warning-message
"""
warnings_touched = process_touched_warnings(warnings, ref_a, ref_b)
- print('Emitting doc warnings matching modified lines:')
+ print("Emitting doc warnings matching modified lines:")
for warning in warnings_touched:
- print('::warning file={file},line={line}::{msg}'.format_map(warning))
+ print("::warning file={file},line={line}::{msg}".format_map(warning))
print(warning[0])
if not warnings_touched:
- print('None')
+ print("None")
def fail_if_regression(
@@ -217,19 +217,19 @@ def fail_if_regression(
"""
all_rst = {
str(rst)
- for rst in Path('Doc/').rglob('*.rst')
+ for rst in Path("Doc/").rglob("*.rst")
if rst.parts[1] not in EXCLUDE_SUBDIRS
}
should_be_clean = all_rst - files_with_expected_nits - EXCLUDE_FILES
problem_files = sorted(should_be_clean & files_with_nits)
if problem_files:
- print('\nError: must not contain warnings:\n')
+ print("\nError: must not contain warnings:\n")
for filename in problem_files:
print(filename)
for warning in warnings:
if filename in warning:
if match := WARNING_PATTERN.fullmatch(warning):
- print(' {line}: {msg}'.format_map(match))
+ print(" {line}: {msg}".format_map(match))
return -1
return 0
@@ -243,10 +243,10 @@ def fail_if_improved(
"""
files_with_no_nits = files_with_expected_nits - files_with_nits
if files_with_no_nits:
- print('\nCongratulations! You improved:\n')
+ print("\nCongratulations! You improved:\n")
for filename in sorted(files_with_no_nits):
print(filename)
- print('\nPlease remove from Doc/tools/.nitignore\n')
+ print("\nPlease remove from Doc/tools/.nitignore\n")
return -1
return 0
@@ -255,15 +255,15 @@ def fail_if_new_news_nit(warnings: list[str], threshold: int) -> int:
"""
Ensure no warnings are found in the NEWS file before a given line number.
"""
- news_nits = (warning for warning in warnings if '/build/NEWS:' in warning)
+ news_nits = (warning for warning in warnings if "/build/NEWS:" in warning)
# Nits found before the threshold line
new_news_nits = [
- nit for nit in news_nits if int(nit.split(':')[1]) <= threshold
+ nit for nit in news_nits if int(nit.split(":")[1]) <= threshold
]
if new_news_nits:
- print('\nError: new NEWS nits:\n')
+ print("\nError: new NEWS nits:\n")
for warning in new_news_nits:
print(warning)
return -1
@@ -274,58 +274,58 @@ def fail_if_new_news_nit(warnings: list[str], threshold: int) -> int:
def main(argv: list[str] | None = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument(
- '--annotate-diff',
- nargs='*',
- metavar=('BASE_REF', 'HEAD_REF'),
- help='Add GitHub Actions annotations on the diff for warnings on '
- 'lines changed between the given refs (main and HEAD, by default)',
+ "--annotate-diff",
+ nargs="*",
+ metavar=("BASE_REF", "HEAD_REF"),
+ help="Add GitHub Actions annotations on the diff for warnings on "
+ "lines changed between the given refs (main and HEAD, by default)",
)
parser.add_argument(
- '--fail-if-regression',
- action='store_true',
- help='Fail if known-good files have warnings',
+ "--fail-if-regression",
+ action="store_true",
+ help="Fail if known-good files have warnings",
)
parser.add_argument(
- '--fail-if-improved',
- action='store_true',
- help='Fail if new files with no nits are found',
+ "--fail-if-improved",
+ action="store_true",
+ help="Fail if new files with no nits are found",
)
parser.add_argument(
- '--fail-if-new-news-nit',
- metavar='threshold',
+ "--fail-if-new-news-nit",
+ metavar="threshold",
type=int,
- nargs='?',
+ nargs="?",
const=NEWS_NIT_THRESHOLD,
- help='Fail if new NEWS nit found before threshold line number',
+ help="Fail if new NEWS nit found before threshold line number",
)
args = parser.parse_args(argv)
if args.annotate_diff is not None and len(args.annotate_diff) > 2:
parser.error(
- '--annotate-diff takes between 0 and 2 ref args, not '
- f'{len(args.annotate_diff)} {tuple(args.annotate_diff)}'
+ "--annotate-diff takes between 0 and 2 ref args, not "
+ f"{len(args.annotate_diff)} {tuple(args.annotate_diff)}"
)
exit_code = 0
- wrong_directory_msg = 'Must run this script from the repo root'
- if not Path('Doc').exists() or not Path('Doc').is_dir():
+ wrong_directory_msg = "Must run this script from the repo root"
+ if not Path("Doc").exists() or not Path("Doc").is_dir():
raise RuntimeError(wrong_directory_msg)
- with Path('Doc/sphinx-warnings.txt').open(encoding='UTF-8') as f:
+ with Path("Doc/sphinx-warnings.txt").open(encoding="UTF-8") as f:
warnings = f.read().splitlines()
cwd = str(Path.cwd()) + os.path.sep
files_with_nits = {
- warning.removeprefix(cwd).split(':')[0]
+ warning.removeprefix(cwd).split(":")[0]
for warning in warnings
- if 'Doc/' in warning
+ if "Doc/" in warning
}
- with Path('Doc/tools/.nitignore').open(encoding='UTF-8') as clean_files:
+ with Path("Doc/tools/.nitignore").open(encoding="UTF-8") as clean_files:
files_with_expected_nits = {
filename.strip()
for filename in clean_files
- if filename.strip() and not filename.startswith('#')
+ if filename.strip() and not filename.startswith("#")
}
if args.annotate_diff is not None:
@@ -347,5 +347,5 @@ def main(argv: list[str] | None = None) -> int:
return exit_code
-if __name__ == '__main__':
+if __name__ == "__main__":
sys.exit(main())
diff --git a/Doc/tools/extensions/lexers/__init__.py b/Doc/tools/extensions/lexers/__init__.py
index 6b4746a762ced3..e12ac5be8139cc 100644
--- a/Doc/tools/extensions/lexers/__init__.py
+++ b/Doc/tools/extensions/lexers/__init__.py
@@ -4,12 +4,12 @@
def setup(app):
# Used for highlighting Parser/Python.asdl in library/ast.rst
- app.add_lexer('asdl', ASDLLexer)
+ app.add_lexer("asdl", ASDLLexer)
# Used for highlighting Grammar/python.gram in reference/grammar.rst
- app.add_lexer('peg', PEGLexer)
+ app.add_lexer("peg", PEGLexer)
return {
- 'version': '1.0',
- 'parallel_read_safe': True,
- 'parallel_write_safe': True,
+ "version": "1.0",
+ "parallel_read_safe": True,
+ "parallel_write_safe": True,
}
diff --git a/Doc/tools/extensions/lexers/asdl_lexer.py b/Doc/tools/extensions/lexers/asdl_lexer.py
index 9e937ac8c2e4df..3a74174a1f7dfb 100644
--- a/Doc/tools/extensions/lexers/asdl_lexer.py
+++ b/Doc/tools/extensions/lexers/asdl_lexer.py
@@ -3,40 +3,40 @@
class ASDLLexer(RegexLexer):
- name = 'ASDL'
- aliases = ['asdl']
- filenames = ['*.asdl']
- _name = r'([^\W\d]\w*)'
- _text_ws = r'(\s*)'
+ name = "ASDL"
+ aliases = ["asdl"]
+ filenames = ["*.asdl"]
+ _name = r"([^\W\d]\w*)"
+ _text_ws = r"(\s*)"
tokens = {
- 'ws': [
- (r'\n', Text),
- (r'\s+', Text),
- (r'--.*?$', Comment.Singleline),
+ "ws": [
+ (r"\n", Text),
+ (r"\s+", Text),
+ (r"--.*?$", Comment.Singleline),
],
- 'root': [
- include('ws'),
+ "root": [
+ include("ws"),
(
- r'(module)' + _text_ws + _name,
+ r"(module)" + _text_ws + _name,
bygroups(Keyword, Text, Name.Tag),
),
(
- r'(\w+)(\*\s|\?\s|\s)(\w+)',
+ r"(\w+)(\*\s|\?\s|\s)(\w+)",
bygroups(Name.Builtin.Pseudo, Operator, Name),
),
# Keep in line with ``builtin_types`` from Parser/asdl.py.
# ASDL's 4 builtin types are
# constant, identifier, int, string
- ('constant|identifier|int|string', Name.Builtin),
- (r'attributes', Name.Builtin),
+ ("constant|identifier|int|string", Name.Builtin),
+ (r"attributes", Name.Builtin),
(
- _name + _text_ws + '(=)',
+ _name + _text_ws + "(=)",
bygroups(Name, Text, Operator),
),
(_name, Name.Class),
- (r'\|', Operator),
- (r'{|}|\(|\)', Punctuation),
- (r'.', Text),
+ (r"\|", Operator),
+ (r"{|}|\(|\)", Punctuation),
+ (r".", Text),
],
}
diff --git a/Doc/tools/extensions/lexers/peg_lexer.py b/Doc/tools/extensions/lexers/peg_lexer.py
index 5c4bf1bd7385a4..827af205583f61 100644
--- a/Doc/tools/extensions/lexers/peg_lexer.py
+++ b/Doc/tools/extensions/lexers/peg_lexer.py
@@ -16,64 +16,64 @@ class PEGLexer(RegexLexer):
- Rules named `invalid_*` or `incorrect_*`
"""
- name = 'PEG'
- aliases = ['peg']
- filenames = ['*.gram']
- _name = r'([^\W\d]\w*)'
- _text_ws = r'(\s*)'
+ name = "PEG"
+ aliases = ["peg"]
+ filenames = ["*.gram"]
+ _name = r"([^\W\d]\w*)"
+ _text_ws = r"(\s*)"
tokens = {
- 'ws': [(r'\n', Text), (r'\s+', Text), (r'#.*$', Comment.Singleline),],
- 'lookaheads': [
+ "ws": [(r"\n", Text), (r"\s+", Text), (r"#.*$", Comment.Singleline),],
+ "lookaheads": [
# Forced tokens
- (r'(&&)(?=\w+\s?)', bygroups(None)),
+ (r"(&&)(?=\w+\s?)", bygroups(None)),
(r"(&&)(?='.+'\s?)", bygroups(None)),
(r'(&&)(?=".+"\s?)', bygroups(None)),
- (r'(&&)(?=\(.+\)\s?)', bygroups(None)),
+ (r"(&&)(?=\(.+\)\s?)", bygroups(None)),
- (r'(?<=\|\s)(&\w+\s?)', bygroups(None)),
+ (r"(?<=\|\s)(&\w+\s?)", bygroups(None)),
(r"(?<=\|\s)(&'.+'\s?)", bygroups(None)),
(r'(?<=\|\s)(&".+"\s?)', bygroups(None)),
- (r'(?<=\|\s)(&\(.+\)\s?)', bygroups(None)),
+ (r"(?<=\|\s)(&\(.+\)\s?)", bygroups(None)),
],
- 'metas': [
+ "metas": [
(r"(@\w+ '''(.|\n)+?''')", bygroups(None)),
- (r'^(@.*)$', bygroups(None)),
+ (r"^(@.*)$", bygroups(None)),
],
- 'actions': [
- (r'{(.|\n)+?}', bygroups(None)),
+ "actions": [
+ (r"{(.|\n)+?}", bygroups(None)),
],
- 'strings': [
+ "strings": [
(r"'\w+?'", Keyword),
(r'"\w+?"', Keyword),
(r"'\W+?'", Text),
(r'"\W+?"', Text),
],
- 'variables': [
- (_name + _text_ws + '(=)', bygroups(None, None, None),),
- (_name + _text_ws + r'(\[[\w\d_\*]+?\])' + _text_ws + '(=)', bygroups(None, None, None, None, None),),
+ "variables": [
+ (_name + _text_ws + "(=)", bygroups(None, None, None),),
+ (_name + _text_ws + r"(\[[\w\d_\*]+?\])" + _text_ws + "(=)", bygroups(None, None, None, None, None),),
],
- 'invalids': [
- (r'^(\s+\|\s+.*invalid_\w+.*\n)', bygroups(None)),
- (r'^(\s+\|\s+.*incorrect_\w+.*\n)', bygroups(None)),
- (r'^(#.*invalid syntax.*(?:.|\n)*)', bygroups(None),),
+ "invalids": [
+ (r"^(\s+\|\s+.*invalid_\w+.*\n)", bygroups(None)),
+ (r"^(\s+\|\s+.*incorrect_\w+.*\n)", bygroups(None)),
+ (r"^(#.*invalid syntax.*(?:.|\n)*)", bygroups(None),),
],
- 'root': [
- include('invalids'),
- include('ws'),
- include('lookaheads'),
- include('metas'),
- include('actions'),
- include('strings'),
- include('variables'),
- (r'\b(?!(NULL|EXTRA))([A-Z_]+)\b\s*(?!\()', Text,),
+ "root": [
+ include("invalids"),
+ include("ws"),
+ include("lookaheads"),
+ include("metas"),
+ include("actions"),
+ include("strings"),
+ include("variables"),
+ (r"\b(?!(NULL|EXTRA))([A-Z_]+)\b\s*(?!\()", Text,),
(
- r'^\s*' + _name + r'\s*' + r'(\[.*\])?' + r'\s*' + r'(\(.+\))?' + r'\s*(:)',
+ r"^\s*" + _name + r"\s*" + r"(\[.*\])?" + r"\s*" + r"(\(.+\))?" + r"\s*(:)",
bygroups(Name.Function, None, None, Punctuation),
),
(_name, Name.Function),
- (r'[\||\.|\+|\*|\?]', Operator),
- (r'{|}|\(|\)|\[|\]', Punctuation),
- (r'.', Text),
+ (r"[\||\.|\+|\*|\?]", Operator),
+ (r"{|}|\(|\)|\[|\]", Punctuation),
+ (r".", Text),
],
}
From 783145695f90efb6787011a0ca0fdc2a110e7deb Mon Sep 17 00:00:00 2001
From: Adam Turner <9087854+aa-turner@users.noreply.github.com>
Date: Fri, 19 Jul 2024 12:29:50 +0100
Subject: [PATCH 12/12] Preserve even more
---
Doc/conf.py | 40 ++++++++++++++++++++--------------------
1 file changed, 20 insertions(+), 20 deletions(-)
diff --git a/Doc/conf.py b/Doc/conf.py
index ce8378bb21aed4..1e514b57843161 100644
--- a/Doc/conf.py
+++ b/Doc/conf.py
@@ -44,7 +44,7 @@
extensions.append('sphinxext.opengraph')
-doctest_global_setup = """
+doctest_global_setup = '''
try:
import _tkinter
except ImportError:
@@ -54,7 +54,7 @@
import warnings
warnings.simplefilter('error')
del warnings
-"""
+'''
manpages_url = 'https://manpages.debian.org/{path}'
@@ -325,8 +325,8 @@
'root_include_title': False, # We use the version switcher instead.
}
-if os.getenv('READTHEDOCS'):
- html_theme_options['hosted_on'] = (
+if os.getenv("READTHEDOCS"):
+ html_theme_options["hosted_on"] = (
'Read the Docs'
)
@@ -334,22 +334,22 @@
# https://github.com/python/cpython/issues/91207
if any('htmlhelp' in arg for arg in sys.argv):
html_style = 'pydoctheme.css'
- print('\nWARNING: Windows CHM Help is no longer supported.')
- print('It may be removed in the future\n')
+ print("\nWARNING: Windows CHM Help is no longer supported.")
+ print("It may be removed in the future\n")
# Short title used e.g. for HTML tags.
html_short_title = f'{release} Documentation'
# Deployment preview information
# (See .readthedocs.yml and https://docs.readthedocs.io/en/stable/reference/environment-variables.html)
-is_deployment_preview = os.getenv('READTHEDOCS_VERSION_TYPE') == 'external'
-repository_url = os.getenv('READTHEDOCS_GIT_CLONE_URL', '')
-repository_url = repository_url.removesuffix('.git')
+is_deployment_preview = os.getenv("READTHEDOCS_VERSION_TYPE") == "external"
+repository_url = os.getenv("READTHEDOCS_GIT_CLONE_URL", "")
+repository_url = repository_url.removesuffix(".git")
html_context = {
- 'is_deployment_preview': is_deployment_preview,
- 'repository_url': repository_url or None,
- 'pr_id': os.getenv('READTHEDOCS_VERSION'),
- 'enable_analytics': os.getenv('PYTHON_DOCS_ENABLE_ANALYTICS'),
+ "is_deployment_preview": is_deployment_preview,
+ "repository_url": repository_url or None,
+ "pr_id": os.getenv("READTHEDOCS_VERSION"),
+ "enable_analytics": os.getenv("PYTHON_DOCS_ENABLE_ANALYTICS"),
}
# This 'Last updated on:' timestamp is inserted at the bottom of every page.
@@ -397,7 +397,7 @@
latex_elements = {
# For the LaTeX preamble.
- 'preamble': r"""
+ 'preamble': r'''
\authoraddress{
\sphinxstrong{Python Software Foundation}\\
Email: \sphinxemail{docs@python.org}
@@ -405,7 +405,7 @@
\let\Verbatim=\OriginalVerbatim
\let\endVerbatim=\endOriginalVerbatim
\setcounter{tocdepth}{2}
-""",
+''',
# The paper size ('letter' or 'a4').
'papersize': 'a4',
# The font size ('10pt', '11pt' or '12pt').
@@ -469,7 +469,7 @@
(
'whatsnew/' + version,
'whatsnew.tex',
- "What's New in Python",
+ 'What\'s New in Python',
'A. M. Kuchling',
'howto',
),
@@ -582,10 +582,10 @@
# mapping unique short aliases to a base URL and a prefix.
# https://www.sphinx-doc.org/en/master/usage/extensions/extlinks.html
extlinks = {
- 'cve': ('https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-%s', 'CVE-%s'),
- 'cwe': ('https://cwe.mitre.org/data/definitions/%s.html', 'CWE-%s'),
- 'pypi': ('https://pypi.org/project/%s/', '%s'),
- 'source': (SOURCE_URI, '%s'),
+ "cve": ("https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-%s", "CVE-%s"),
+ "cwe": ("https://cwe.mitre.org/data/definitions/%s.html", "CWE-%s"),
+ "pypi": ("https://pypi.org/project/%s/", "%s"),
+ "source": (SOURCE_URI, "%s"),
}
extlinks_detect_hardcoded_links = True