diff --git a/src/rapids_pre_commit_hooks/codeowners.py b/src/rapids_pre_commit_hooks/codeowners.py index 4fcc3a2..a1c01ba 100644 --- a/src/rapids_pre_commit_hooks/codeowners.py +++ b/src/rapids_pre_commit_hooks/codeowners.py @@ -1,12 +1,15 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. +# SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION. # SPDX-License-Identifier: Apache-2.0 import argparse import dataclasses import re -from typing import Protocol +from typing import Protocol, TYPE_CHECKING -from rapids_pre_commit_hooks.lint import Linter, LintMain, LintWarning +from .lint import Linter, LintMain + +if TYPE_CHECKING: + from .lint import LintWarning, Span CODEOWNERS_OWNER_RE_STR = r"([^\n#\s\\]|\\[^\n])+" CODEOWNERS_OWNER_RE = re.compile(rf"\s+(?P{CODEOWNERS_OWNER_RE_STR})") @@ -18,14 +21,14 @@ @dataclasses.dataclass class FilePattern: filename: str - pos: tuple[int, int] + span: "Span" @dataclasses.dataclass class Owner: owner: str - pos: tuple[int, int] - pos_with_leading_whitespace: tuple[int, int] + span: "Span" + span_with_leading_whitespace: "Span" @dataclasses.dataclass @@ -136,7 +139,7 @@ def parse_codeowners_line(line: str, skip: int) -> CodeownersLine | None: file_pattern = FilePattern( filename=line_match.group("file"), - pos=( + span=( line_match.span("file")[0] + skip, line_match.span("file")[1] + skip, ), @@ -152,8 +155,8 @@ def parse_codeowners_line(line: str, skip: int) -> CodeownersLine | None: owners.append( Owner( owner=owner_match.group("owner"), - pos=(start + line_skip, end + line_skip), - pos_with_leading_whitespace=( + span=(start + line_skip, end + line_skip), + span_with_leading_whitespace=( whitespace_start + line_skip, end + line_skip, ), @@ -167,7 +170,7 @@ def check_codeowners_line( linter: Linter, args: argparse.Namespace, codeowners_line: CodeownersLine, - found_files: list[tuple[RequiredCodeownersLine, tuple[int, int]]], + found_files: list[tuple[RequiredCodeownersLine, "Span"]], ) -> None: for required_codeowners_line in required_codeowners_lines(args): if required_codeowners_line.file == codeowners_line.file.filename: @@ -186,13 +189,13 @@ def check_codeowners_line( ] if extraneous_owners: warning = linter.add_warning( - codeowners_line.file.pos, + codeowners_line.file.span, f"file '{codeowners_line.file.filename}' has " "incorrect owners", ) for owner in extraneous_owners: warning.add_replacement( - owner.pos_with_leading_whitespace, "" + owner.span_with_leading_whitespace, "" ) missing_required_owners: list[str] = [] @@ -205,34 +208,34 @@ def check_codeowners_line( if missing_required_owners: if not warning: warning = linter.add_warning( - codeowners_line.file.pos, + codeowners_line.file.span, f"file '{codeowners_line.file.filename}' has " "incorrect owners", ) extra_string = " " + " ".join(missing_required_owners) - last = codeowners_line.owners[-1].pos[1] + last = codeowners_line.owners[-1].span[1] warning.add_replacement((last, last), extra_string) - for found_file, found_pos in found_files: + for found_file, found_span in found_files: if codeowners_line.file.filename in found_file.after: linter.add_warning( - found_pos, + found_span, f"file '{found_file.file}' should come after " f"'{codeowners_line.file.filename}'", ).add_note( - codeowners_line.file.pos, + codeowners_line.file.span, f"file '{codeowners_line.file.filename}' is here", ) found_files.append( - (required_codeowners_line, codeowners_line.file.pos) + (required_codeowners_line, codeowners_line.file.span) ) break def check_codeowners(linter: Linter, args: argparse.Namespace) -> None: - found_files: list[tuple[RequiredCodeownersLine, tuple[int, int]]] = [] - for begin, end in linter.lines.pos: + found_files: list[tuple[RequiredCodeownersLine, "Span"]] = [] + for begin, end in linter.lines.spans: line = linter.content[begin:end] codeowners_line = parse_codeowners_line(line, begin) if codeowners_line: diff --git a/src/rapids_pre_commit_hooks/copyright.py b/src/rapids_pre_commit_hooks/copyright.py index 454c10f..126b6a0 100644 --- a/src/rapids_pre_commit_hooks/copyright.py +++ b/src/rapids_pre_commit_hooks/copyright.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024-2025, NVIDIA CORPORATION. +# SPDX-FileCopyrightText: Copyright (c) 2024-2026, NVIDIA CORPORATION. # SPDX-License-Identifier: Apache-2.0 import dataclasses @@ -21,7 +21,7 @@ from collections.abc import Callable, Generator, Iterable from typing import Optional - from .lint import LintWarning + from .lint import LintWarning, Span SPDX_COPYRIGHT_RE: re.Pattern = re.compile( @@ -86,21 +86,18 @@ } -_PosType = tuple[int, int] - - @dataclasses.dataclass class CopyrightMatch: - span: _PosType - spdx_filecopyrighttext_tag_span: _PosType | None - full_copyright_text_span: _PosType - nvidia_copyright_text_span: _PosType - years_span: _PosType - first_year_span: _PosType - last_year_span: _PosType | None - spdx_license_identifier_tag_span: _PosType | None - spdx_license_identifier_text_span: _PosType | None - long_form_text_span: _PosType | None = None + span: "Span" + spdx_filecopyrighttext_tag_span: "Span | None" + full_copyright_text_span: "Span" + nvidia_copyright_text_span: "Span" + years_span: "Span" + first_year_span: "Span" + last_year_span: "Span | None" + spdx_license_identifier_tag_span: "Span | None" + spdx_license_identifier_text_span: "Span | None" + long_form_text_span: "Span | None" = None class NoTargetBranchWarning(RuntimeWarning): @@ -126,7 +123,7 @@ def match_copyright( ) -> CopyrightMatch | None: if re_match := SPDX_COPYRIGHT_RE.search(lines.content, start): - def optional_match(name: str) -> _PosType | None: + def optional_match(name: str) -> "Span | None": try: return ( span if (span := re_match.span(name)) != (-1, -1) else None @@ -157,7 +154,7 @@ def optional_match(name: str) -> _PosType | None: except IndexError: license_identifier = None - if pos := find_long_form_text( + if span := find_long_form_text( lines, filename, license_identifier, @@ -167,8 +164,8 @@ def optional_match(name: str) -> _PosType | None: or match.full_copyright_text_span )[0], ): - match.long_form_text_span = pos - match.span = (match.span[0], pos[1]) + match.long_form_text_span = span + match.span = (match.span[0], span[1]) return match @@ -178,21 +175,21 @@ def optional_match(name: str) -> _PosType | None: def match_all_copyright( lines: Lines, filename: str | os.PathLike[str], - boundaries: list[tuple[_PosType, bool]], + boundaries: list[tuple["Span", bool]], ) -> "Generator[CopyrightMatch]": start = 0 while match := match_copyright(lines, filename, start): - if Linter.is_warning_range_enabled(boundaries, match.span): + if Linter.is_warning_span_enabled(boundaries, match.span): yield match start = match.span[1] def compute_prefix( - lines: Lines, filename: str | os.PathLike[str], index: int + lines: Lines, filename: str | os.PathLike[str], pos: int ) -> str: - line = lines.line_for_pos(index) - prefix = lines.content[lines.pos[line][0] : index] + line = lines.line_for_pos(pos) + prefix = lines.content[lines.spans[line][0] : pos] if C_STYLE_COMMENTS_RE.search(str(filename)): prefix = prefix.replace("/*", " *") return prefix @@ -202,11 +199,11 @@ def find_long_form_text( lines: Lines, filename: str | os.PathLike[str], identifier: str | None, - index: int, -) -> _PosType | None: - prefix = compute_prefix(lines, filename, index) - line = lines.line_for_pos(index) - rest_of_lines = lines.pos[line + 1 :] + pos: int, +) -> "Span | None": + prefix = compute_prefix(lines, filename, pos) + line = lines.line_for_pos(pos) + rest_of_lines = lines.spans[line + 1 :] licenses: Iterable[list[str]] = LONG_FORM_LICENSE_TEXT.values() if identifier: @@ -217,7 +214,7 @@ def find_long_form_text( def license_levenshtein_distance( license_text: str, - ) -> tuple[_PosType, int] | None: + ) -> tuple["Span", int] | None: """Do a line-by-line Levenshtein comparison between the license and \ the text. @@ -238,8 +235,8 @@ def license_levenshtein_distance( first_line: str | None = None actual_license_lines: list[str] = [] score = 0 - for license_line, file_pos in zip(license_lines, rest_of_lines): - file_line = lines.content[slice(*file_pos)] + for license_line, file_span in zip(license_lines, rest_of_lines): + file_line = lines.content[slice(*file_span)] if first_line is None: first_line = file_line if not file_line.startswith(prefix) and not prefix.startswith( @@ -255,13 +252,13 @@ def license_levenshtein_distance( assert first_line is not None return ( ( - lines.pos[line + 1][0] + min(len(prefix), len(first_line)), - lines.pos[line + len(actual_license_lines)][1], + lines.spans[line + 1][0] + min(len(prefix), len(first_line)), + lines.spans[line + len(actual_license_lines)][1], ), score, ) - scores: itertools.chain[tuple[_PosType, int]] = itertools.chain( + scores: itertools.chain[tuple["Span", int]] = itertools.chain( ( score_tuple for license_texts in licenses @@ -281,9 +278,9 @@ def license_levenshtein_distance( is not None ), ) - pos, score = min(scores, key=lambda score: score[1], default=(None, None)) + span, score = min(scores, key=lambda score: score[1], default=(None, None)) if score is not None: - return pos + return span return None @@ -309,7 +306,7 @@ def has_cmake_format_off_comment( return ( previous_line >= 0 and "cmake-format: off" - in linter.content[slice(*linter.lines.pos[previous_line])] + in linter.content[slice(*linter.lines.spans[previous_line])] ) @@ -318,9 +315,9 @@ def has_cmake_format_on_comment( ) -> bool: next_line = linter.lines.line_for_pos(match.span[1]) + 1 return ( - next_line < len(linter.lines.pos) + next_line < len(linter.lines.spans) and "cmake-format: on" - in linter.content[slice(*linter.lines.pos[next_line])] + in linter.content[slice(*linter.lines.spans[next_line])] ) @@ -363,11 +360,11 @@ def apply_copyright_revert( old_content[slice(*old_match.years_span)] == linter.content[slice(*new_match.years_span)] ): - warning_pos = new_match.full_copyright_text_span + warning_span = new_match.full_copyright_text_span else: - warning_pos = new_match.years_span + warning_span = new_match.years_span w = linter.add_warning( - warning_pos, + warning_span, "copyright is not out of date and should not be updated", ) w.add_replacement( @@ -424,7 +421,7 @@ def apply_spdx_filecopyrighttext_tag_insert( ) if add_cmake_format_off: w.add_note( - linter.lines.pos[ + linter.lines.spans[ linter.lines.line_for_pos(match.full_copyright_text_span[0]) ], "no cmake-format: off comment before copyright notice", @@ -460,7 +457,7 @@ def apply_spdx_license_insert( if cmake and not match.long_form_text_span: add_cmake_format_on = not has_cmake_format_on_comment(linter, match) - next_line_start_pos = linter.lines.pos[line][1] + next_line_start_pos = linter.lines.spans[line][1] w = linter.add_warning( (match_start_pos, match.full_copyright_text_span[1]), "no SPDX-License-Identifier header found", @@ -477,7 +474,7 @@ def apply_spdx_license_insert( ) if add_cmake_format_on: w.add_note( - linter.lines.pos[line], + linter.lines.spans[line], "no cmake-format: on comment after copyright notice", ) @@ -514,24 +511,26 @@ def apply_spdx_long_form_text_removal( def apply_cmake_format_off_insert( linter: "Linter", match: CopyrightMatch ) -> None: - line = linter.lines.pos[linter.lines.line_for_pos(match.span[0])] + line_span = linter.lines.spans[linter.lines.line_for_pos(match.span[0])] w = linter.add_warning( - line, "no cmake-format: off comment before copyright notice" + line_span, "no cmake-format: off comment before copyright notice" ) w.add_replacement( - (line[0], line[0]), f"# cmake-format: off{linter.lines.newline_style}" + (line_span[0], line_span[0]), + f"# cmake-format: off{linter.lines.newline_style}", ) def apply_cmake_format_on_insert( linter: "Linter", match: CopyrightMatch ) -> None: - line = linter.lines.pos[linter.lines.line_for_pos(match.span[1])] + line_span = linter.lines.spans[linter.lines.line_for_pos(match.span[1])] w = linter.add_warning( - line, "no cmake-format: on comment after copyright notice" + line_span, "no cmake-format: on comment after copyright notice" ) w.add_replacement( - (line[1], line[1]), f"{linter.lines.newline_style}# cmake-format: on" + (line_span[1], line_span[1]), + f"{linter.lines.newline_style}# cmake-format: on", ) @@ -616,7 +615,7 @@ def apply_copyright_insert( pos = 0 if linter.content.startswith("#!"): - pos = linter.lines.pos[1][0] + pos = linter.lines.spans[1][0] extra_newline = ( "" @@ -689,7 +688,9 @@ def apply_copyright_check( ) ) - def match_year_sort(match: CopyrightMatch) -> tuple[int, int]: + def match_year_sort( + match: CopyrightMatch, + ) -> tuple[int, int]: # not a Span return ( int( linter.content[ diff --git a/src/rapids_pre_commit_hooks/hardcoded_version.py b/src/rapids_pre_commit_hooks/hardcoded_version.py index 367f4b2..3fd9c83 100644 --- a/src/rapids_pre_commit_hooks/hardcoded_version.py +++ b/src/rapids_pre_commit_hooks/hardcoded_version.py @@ -9,14 +9,14 @@ import tomlkit.exceptions from .lint import LintMain, Linter -from .utils.toml import find_value_location +from .utils.toml import find_value_span if TYPE_CHECKING: import argparse import os from collections.abc import Generator - from .lint import Lines + from .lint import Lines, Span # Matches any 2-part or 3-part numeric version strings, and stores the # components in named capture groups: @@ -41,50 +41,50 @@ ) -def get_excluded_section_pyproject_toml( +def get_excluded_span_pyproject_toml( document: tomlkit.TOMLDocument, path: tuple[str, ...] -) -> "Generator[tuple[int, int]]": +) -> "Generator[Span]": try: - yield find_value_location(document, path, append=False) + yield find_value_span(document, path, append=False) except tomlkit.exceptions.NonExistentKey: pass -def get_excluded_sections_pyproject_toml( +def get_excluded_spans_pyproject_toml( linter: Linter, -) -> "Generator[tuple[int, int]]": +) -> "Generator[Span]": document = tomlkit.loads(linter.content) - yield from get_excluded_section_pyproject_toml( + yield from get_excluded_span_pyproject_toml( document, ("project", "dependencies") ) - yield from get_excluded_section_pyproject_toml( + yield from get_excluded_span_pyproject_toml( document, ("project", "optional-dependencies") ) - yield from get_excluded_section_pyproject_toml( + yield from get_excluded_span_pyproject_toml( document, ("build-system", "requires") ) - yield from get_excluded_section_pyproject_toml( + yield from get_excluded_span_pyproject_toml( document, ("tool", "rapids-build-backend", "requires") ) -def get_excluded_sections(linter: Linter) -> "Generator[tuple[int, int]]": +def get_excluded_spans(linter: Linter) -> "Generator[Span]": if PYPROJECT_TOML_RE.search(linter.filename): - yield from get_excluded_sections_pyproject_toml(linter) + yield from get_excluded_spans_pyproject_toml(linter) def is_deprecation_notice(lines: "Lines", match: "re.Match[str]") -> bool: this_line = lines.line_for_pos(match.start("full")) first_line = max(0, this_line - 3) - start = lines.pos[first_line][0] - end = lines.pos[this_line][1] + start = lines.spans[first_line][0] + end = lines.spans[this_line][1] return bool(DEPRECATED_RE.search(lines.content[start:end])) def is_number_array(lines: "Lines", match: "re.Match[str]") -> bool: this_line = lines.line_for_pos(match.start("full")) - start, end = lines.pos[this_line] + start, end = lines.spans[this_line] return bool(NUMBER_ARRAY_RE.search(lines.content[start:end])) @@ -153,16 +153,14 @@ def check_hardcoded_version( return full_version = read_version_file(args.version_file) - excluded_sections = sorted(get_excluded_sections(linter)) + excluded_spans = sorted(get_excluded_spans(linter)) for match in find_hardcoded_versions(linter.content, full_version): - section_index = bisect.bisect_right( - excluded_sections, match.span("full") - ) - if section_index > 0: - section_start, section_end = excluded_sections[section_index - 1] + span_index = bisect.bisect_right(excluded_spans, match.span("full")) + if span_index > 0: + span_start, span_end = excluded_spans[span_index - 1] if ( - match.start("full") >= section_start - and match.end("full") <= section_end + match.start("full") >= span_start + and match.end("full") <= span_end ): continue diff --git a/src/rapids_pre_commit_hooks/lint.py b/src/rapids_pre_commit_hooks/lint.py index 244864e..602b1b6 100644 --- a/src/rapids_pre_commit_hooks/lint.py +++ b/src/rapids_pre_commit_hooks/lint.py @@ -17,7 +17,7 @@ if TYPE_CHECKING: from collections.abc import Callable, Generator, Iterator -_PosType = tuple[int, int] +Span = tuple[int, int] class OverlappingReplacementsError(RuntimeError): @@ -30,53 +30,53 @@ class BinaryFileWarning(Warning): @dataclasses.dataclass class Replacement: - pos: _PosType + span: Span newtext: str @dataclasses.dataclass class Note: - pos: _PosType + span: Span msg: str @dataclasses.dataclass class LintWarning: - pos: _PosType + span: Span msg: str replacements: list[Replacement] = dataclasses.field( default_factory=list, kw_only=True ) notes: list[Note] = dataclasses.field(default_factory=list, kw_only=True) - def add_replacement(self, pos: _PosType, newtext: str) -> None: - self.replacements.append(Replacement(pos, newtext)) + def add_replacement(self, span: Span, newtext: str) -> None: + self.replacements.append(Replacement(span, newtext)) - def add_note(self, pos: _PosType, msg: str) -> None: - self.notes.append(Note(pos, msg)) + def add_note(self, span: Span, msg: str) -> None: + self.notes.append(Note(span, msg)) class Lines: @functools.total_ordering class _LineComparator: - def __init__(self, pos: _PosType) -> None: - self.pos: _PosType = pos + def __init__(self, span: Span) -> None: + self.span: Span = span def __lt__(self, other: object) -> bool: assert isinstance(other, int) - return self.pos[1] < other + return self.span[1] < other def __gt__(self, other: object) -> bool: assert isinstance(other, int) - return self.pos[0] > other + return self.span[0] > other def __eq__(self, other: object) -> bool: assert isinstance(other, int) - return self.pos[0] <= other <= self.pos[1] + return self.span[0] <= other <= self.span[1] def __init__(self, content: str) -> None: self.content: str = content - self.pos: list[_PosType] = [] + self.spans: list[Span] = [] line_begin = 0 line_end = 0 @@ -91,18 +91,18 @@ def __init__(self, content: str) -> None: for c in content: if state == "c": if c == "\r": - self.pos.append((line_begin, line_end)) + self.spans.append((line_begin, line_end)) line_end = line_begin = line_end + 1 state = "r" elif c == "\n": - self.pos.append((line_begin, line_end)) + self.spans.append((line_begin, line_end)) line_end = line_begin = line_end + 1 self.newline_count["\n"] += 1 else: line_end += 1 elif state == "r": if c == "\r": - self.pos.append((line_begin, line_end)) + self.spans.append((line_begin, line_end)) line_end = line_begin = line_end + 1 self.newline_count["\r"] += 1 elif c == "\n": @@ -114,7 +114,7 @@ def __init__(self, content: str) -> None: state = "c" self.newline_count["\r"] += 1 - self.pos.append((line_begin, line_end)) + self.spans.append((line_begin, line_end)) if state == "r": self.newline_count["\r"] += 1 self.newline_style, _ = max( @@ -122,17 +122,17 @@ def __init__(self, content: str) -> None: key=lambda item: item[1], ) - def line_for_pos(self, index: int) -> int: - line_index = bisect.bisect_left( - [Lines._LineComparator(line) for line in self.pos], index + def line_for_pos(self, pos: int) -> int: + line_span_index = bisect.bisect_left( + [Lines._LineComparator(line) for line in self.spans], pos ) try: - line_pos = self.pos[line_index] + line_span = self.spans[line_span_index] except IndexError: - raise IndexError(f"Position {index} is not in the string") - if not (line_pos[0] <= index <= line_pos[1]): - raise IndexError(f"Position {index} is inside a line separator") - return line_index + raise IndexError(f"Position {pos} is not in the string") + if not (line_span[0] <= pos <= line_span[1]): + raise IndexError(f"Position {pos} is inside a line separator") + return line_span_index class Linter: @@ -155,8 +155,8 @@ def __init__(self, filename: str, content: str, warning_name: str) -> None: Linter.get_disabled_enabled_boundaries(self.lines, warning_name) ) - def add_warning(self, pos: _PosType, msg: str) -> LintWarning: - w = LintWarning(pos, msg) + def add_warning(self, span: Span, msg: str) -> LintWarning: + w = LintWarning(span, msg) self.warnings.append(w) return w @@ -167,19 +167,19 @@ def fix(self) -> str: for warning in self.get_enabled_warnings() for replacement in warning.replacements ), - key=lambda replacement: replacement.pos, + key=lambda replacement: replacement.span, ) for r1, r2 in pairwise(sorted_replacements): - if r1.pos[1] > r2.pos[0]: + if r1.span[1] > r2.span[0]: raise OverlappingReplacementsError(f"{r1} overlaps with {r2}") cursor = 0 replaced_content = "" for replacement in sorted_replacements: - replaced_content += self.content[cursor : replacement.pos[0]] + replaced_content += self.content[cursor : replacement.span[0]] replaced_content += replacement.newtext - cursor = replacement.pos[1] + cursor = replacement.span[1] replaced_content += self.content[cursor:] return replaced_content @@ -187,42 +187,42 @@ def fix(self) -> str: def _print_note( self, note_type: str, - pos: _PosType, + span: Span, msg: str, newtext: str | None = None, ) -> None: - line_index = self.lines.line_for_pos(pos[0]) - line_pos = self.lines.pos[line_index] + line_index = self.lines.line_for_pos(span[0]) + line_span = self.lines.spans[line_index] self.console.print( f"In file [bold]{escape(self.filename)}:{line_index + 1}:" - f"{pos[0] - line_pos[0] + 1}[/bold]:" + f"{span[0] - line_span[0] + 1}[/bold]:" ) - self._print_highlighted_code(pos, newtext) + self._print_highlighted_code(span, newtext) self.console.print(f"[bold]{note_type}:[/bold] {escape(msg)}") self.console.print() def print_warnings(self, fix_applied: bool = False) -> None: sorted_warnings = sorted( self.get_enabled_warnings(), - key=lambda warning: warning.pos, + key=lambda warning: warning.span, ) for warning in sorted_warnings: - self._print_note("warning", warning.pos, warning.msg) + self._print_note("warning", warning.span, warning.msg) for note in warning.notes: - self._print_note("note", note.pos, note.msg) + self._print_note("note", note.span, note.msg) for replacement in warning.replacements: - line_index = self.lines.line_for_pos(replacement.pos[0]) - line_pos = self.lines.pos[line_index] + line_span_index = self.lines.line_for_pos(replacement.span[0]) + line_span = self.lines.spans[line_span_index] newtext = replacement.newtext if match := self._NEWLINE_RE.search(newtext): newtext = newtext[: match.start()] long = True else: long = False - if replacement.pos[1] > line_pos[1]: + if replacement.span[1] > line_span[1]: long = True if fix_applied: @@ -241,37 +241,37 @@ def print_warnings(self, fix_applied: bool = False) -> None: else: replacement_msg = "suggested fix" self._print_note( - "note", replacement.pos, replacement_msg, newtext + "note", replacement.span, replacement_msg, newtext ) def _print_highlighted_code( - self, pos: _PosType, replacement: str | None = None + self, span: Span, replacement: str | None = None ) -> None: - line_index = self.lines.line_for_pos(pos[0]) - line_pos = self.lines.pos[line_index] - left = pos[0] + line_span_index = self.lines.line_for_pos(span[0]) + line_span = self.lines.spans[line_span_index] + left = span[0] - if self.lines.line_for_pos(pos[1]) == line_index: - right = pos[1] + if self.lines.line_for_pos(span[1]) == line_span_index: + right = span[1] else: - right = line_pos[1] + right = line_span[1] if replacement is None: self.console.print( - f" {escape(self.content[line_pos[0] : left])}" + f" {escape(self.content[line_span[0] : left])}" f"[bold]{escape(self.content[left:right])}[/bold]" - f"{escape(self.content[right : line_pos[1]])}" + f"{escape(self.content[right : line_span[1]])}" ) else: self.console.print( - f"[red]-{escape(self.content[line_pos[0] : left])}" + f"[red]-{escape(self.content[line_span[0] : left])}" f"[bold]{escape(self.content[left:right])}[/bold]" - f"{escape(self.content[right : line_pos[1]])}[/red]" + f"{escape(self.content[right : line_span[1]])}[/red]" ) self.console.print( - f"[green]+{escape(self.content[line_pos[0] : left])}" + f"[green]+{escape(self.content[line_span[0] : left])}" f"[bold]{escape(replacement)}[/bold]" - f"{escape(self.content[right : line_pos[1]])}[/green]" + f"{escape(self.content[right : line_span[1]])}[/green]" ) @classmethod @@ -279,30 +279,30 @@ def get_disabled_enabled_boundaries( cls, lines: Lines, warning_name: str, - ) -> "list[tuple[_PosType, bool]]": - def helper() -> "Generator[tuple[_PosType, bool]]": + ) -> "list[tuple[Span, bool]]": + def helper() -> "Generator[tuple[Span, bool]]": start = 0 enabled = True - next_line_directives: list[tuple[_PosType, bool]] = [] + next_line_directives: list[tuple[Span, bool]] = [] - def handle_end(end: int) -> "Generator[tuple[_PosType, bool]]": + def handle_end(end: int) -> "Generator[tuple[Span, bool]]": nonlocal start while True: try: - next_line_pos, next_line_enabled = ( + next_line_span, next_line_enabled = ( next_line_directives.pop(0) ) except IndexError: break - if next_line_pos[0] >= end: + if next_line_span[0] >= end: next_line_directives.insert( - 0, (next_line_pos, next_line_enabled) + 0, (next_line_span, next_line_enabled) ) break - if next_line_pos[0] >= start: - yield ((start, next_line_pos[0]), enabled) - yield (next_line_pos, next_line_enabled) - start = next_line_pos[1] + if next_line_span[0] >= start: + yield ((start, next_line_span[0]), enabled) + yield (next_line_span, next_line_enabled) + start = next_line_span[1] if start <= end: yield ((start, end), enabled) @@ -322,15 +322,16 @@ def handle_end(end: int) -> "Generator[tuple[_PosType, bool]]": next_line = this_line + 1 if ( next_line_directives - and next_line_directives[-1][0] == lines.pos[next_line] + and next_line_directives[-1][0] + == lines.spans[next_line] ): next_line_directives[-1] = ( - lines.pos[next_line], + lines.spans[next_line], directive_is_enable, ) else: next_line_directives.append( - (lines.pos[next_line], directive_is_enable) + (lines.spans[next_line], directive_is_enable) ) else: yield from handle_end(m.start()) @@ -342,34 +343,34 @@ def handle_end(end: int) -> "Generator[tuple[_PosType, bool]]": return list(helper()) @classmethod - def is_warning_range_enabled( - cls, boundaries: list[tuple[_PosType, bool]], warning_range: _PosType + def is_warning_span_enabled( + cls, boundaries: list[tuple[Span, bool]], warning_span: Span ) -> bool: start = bisect.bisect_left( boundaries, - warning_range[0], + warning_span[0], key=lambda b: b[0][0], ) end = bisect.bisect_right( boundaries, - warning_range[1], + warning_span[1], key=lambda b: b[0][1], ) - if warning_range[0] == warning_range[1]: + if warning_span[0] == warning_span[1]: move_start = ( - start > 0 and warning_range[0] <= boundaries[start - 1][0][1] + start > 0 and warning_span[0] <= boundaries[start - 1][0][1] ) move_end = ( end < len(boundaries) - and warning_range[1] >= boundaries[end][0][0] + and warning_span[1] >= boundaries[end][0][0] ) else: move_start = ( - start > 0 and warning_range[0] < boundaries[start - 1][0][1] + start > 0 and warning_span[0] < boundaries[start - 1][0][1] ) move_end = ( end < len(boundaries) - and warning_range[1] > boundaries[end][0][0] + and warning_span[1] > boundaries[end][0][0] ) if move_start: start -= 1 @@ -379,8 +380,8 @@ def is_warning_range_enabled( def get_enabled_warnings(self) -> "Iterator[LintWarning]": return filter( - lambda w: Linter.is_warning_range_enabled( - self.disabled_enabled_boundaries, w.pos + lambda w: Linter.is_warning_span_enabled( + self.disabled_enabled_boundaries, w.span ), self.warnings, ) diff --git a/src/rapids_pre_commit_hooks/pyproject_license.py b/src/rapids_pre_commit_hooks/pyproject_license.py index f63595e..a8f3e97 100644 --- a/src/rapids_pre_commit_hooks/pyproject_license.py +++ b/src/rapids_pre_commit_hooks/pyproject_license.py @@ -8,7 +8,7 @@ import tomlkit.exceptions from .lint import Linter, LintMain -from .utils.toml import find_value_location +from .utils.toml import find_value_span RAPIDS_LICENSE: str = "Apache-2.0" ACCEPTABLE_LICENSES: set[str] = { @@ -35,9 +35,9 @@ def check_pyproject_license(linter: Linter, _args: argparse.Namespace) -> None: # the replacement / appending code, and also enforces a bit more # standardization in pyproject.toml files (a good thing on its own!). if isinstance(project_table, tomlkit.container.OutOfOrderTableProxy): - loc = (len(linter.content), len(linter.content)) + span = (len(linter.content), len(linter.content)) linter.add_warning( - loc, + span, ( "[project] table should precede all other [project.*] " "tables and all [project.*] tables should be grouped " @@ -50,21 +50,21 @@ def check_pyproject_license(linter: Linter, _args: argparse.Namespace) -> None: license_value = project_table["license"] # type: ignore[index] except tomlkit.exceptions.NonExistentKey: if add_project_table: - loc = (len(linter.content), len(linter.content)) + span = (len(linter.content), len(linter.content)) linter.add_warning( - loc, f'add project.license with value "{RAPIDS_LICENSE}"' + span, f'add project.license with value "{RAPIDS_LICENSE}"' ).add_replacement( - loc, + span, f"[project]{linter.lines.newline_style}license = " f"{tomlkit.string(RAPIDS_LICENSE).as_string()}" + linter.lines.newline_style, ) else: - loc = find_value_location(document, ("project",), append=True) + span = find_value_span(document, ("project",), append=True) linter.add_warning( - loc, f'add project.license with value "{RAPIDS_LICENSE}"' + span, f'add project.license with value "{RAPIDS_LICENSE}"' ).add_replacement( - loc, + span, f"license = {tomlkit.string(RAPIDS_LICENSE).as_string()}" + linter.lines.newline_style, ) @@ -73,49 +73,45 @@ def check_pyproject_license(linter: Linter, _args: argparse.Namespace) -> None: # handle case where the license is still in # "license = { text = 'something' }" form if isinstance(license_value, tomlkit.items.InlineTable): - loc = find_value_location( - document, ("project", "license"), append=False - ) + span = find_value_span(document, ("project", "license"), append=False) if license_value := document["project"]["license"].get("text", None): # type: ignore[index, union-attr] slugified_license_value = re.sub( r"\s+", "-", str(license_value).strip() ) if slugified_license_value in ACCEPTABLE_LICENSES: linter.add_warning( - loc, f'license should be "{slugified_license_value}"' + span, f'license should be "{slugified_license_value}"' ).add_replacement( - loc, + span, f"{tomlkit.string(slugified_license_value).as_string()}", ) else: linter.add_warning( - loc, + span, f'license should be "{RAPIDS_LICENSE}"' + f', got license = {{ text = "{license_value}" }}', ) else: - linter.add_warning(loc, f'license should be "{RAPIDS_LICENSE}"') + linter.add_warning(span, f'license should be "{RAPIDS_LICENSE}"') return if license_value not in ACCEPTABLE_LICENSES: - loc = find_value_location( - document, ("project", "license"), append=False - ) + span = find_value_span(document, ("project", "license"), append=False) slugified_license_value = re.sub( r"\s+", "-", str(license_value).strip() ) if slugified_license_value in ACCEPTABLE_LICENSES: linter.add_warning( - loc, + span, f'license should be "{slugified_license_value}"' + f', got "{license_value}"', ).add_replacement( - loc, + span, f"{tomlkit.string(slugified_license_value).as_string()}", ) return - linter.add_warning(loc, f'license should be "{RAPIDS_LICENSE}"') + linter.add_warning(span, f'license should be "{RAPIDS_LICENSE}"') def main() -> None: diff --git a/src/rapids_pre_commit_hooks/shell/__init__.py b/src/rapids_pre_commit_hooks/shell/__init__.py index 75a70bd..4e0b2ca 100644 --- a/src/rapids_pre_commit_hooks/shell/__init__.py +++ b/src/rapids_pre_commit_hooks/shell/__init__.py @@ -1,22 +1,24 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024-2025, NVIDIA CORPORATION. +# SPDX-FileCopyrightText: Copyright (c) 2024-2026, NVIDIA CORPORATION. # SPDX-License-Identifier: Apache-2.0 import argparse +from typing import TYPE_CHECKING import bashlex -from ..lint import ExecutionContext, Linter, LintMain, LintWarning +from ..lint import ExecutionContext, LintMain -_PosType = tuple[int, int] +if TYPE_CHECKING: + from ..lint import Linter, LintWarning, Span class LintVisitor(bashlex.ast.nodevisitor): - def __init__(self, linter: Linter, args: argparse.Namespace) -> None: - self.linter: Linter = linter + def __init__(self, linter: "Linter", args: argparse.Namespace) -> None: + self.linter: "Linter" = linter self.args: argparse.Namespace = args - def add_warning(self, pos: _PosType, msg: str) -> LintWarning: - return self.linter.add_warning(pos, msg) + def add_warning(self, span: "Span", msg: str) -> "LintWarning": + return self.linter.add_warning(span, msg) class ShellExecutionContext(ExecutionContext): @@ -28,7 +30,7 @@ def __init__(self, warning_name: str, args: argparse.Namespace) -> None: def add_visitor_class(self, cls: type) -> None: self.visitors.append(cls) - def check_shell(self, linter: Linter, args: argparse.Namespace) -> None: + def check_shell(self, linter: "Linter", args: argparse.Namespace) -> None: parts = bashlex.parse(linter.content) for cls in self.visitors: diff --git a/src/rapids_pre_commit_hooks/shell/verify_conda_yes.py b/src/rapids_pre_commit_hooks/shell/verify_conda_yes.py index 13e4c5a..e973462 100644 --- a/src/rapids_pre_commit_hooks/shell/verify_conda_yes.py +++ b/src/rapids_pre_commit_hooks/shell/verify_conda_yes.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024-2025, NVIDIA CORPORATION. +# SPDX-FileCopyrightText: Copyright (c) 2024-2026, NVIDIA CORPORATION. # SPDX-License-Identifier: Apache-2.0 from . import LintVisitor, ShellMain @@ -56,13 +56,13 @@ def visitcommand(self, _n, parts) -> None: return if not any(arg in command["args"] for arg in command_args): - warning_pos = (parts[0].pos[0], parts[command_index].pos[1]) - insert_pos = (warning_pos[1], warning_pos[1]) + warning_span = (parts[0].pos[0], parts[command_index].pos[1]) + insert_span = (warning_span[1], warning_span[1]) warning = self.add_warning( - warning_pos, f"add {command['args'][0]} argument" + warning_span, f"add {command['args'][0]} argument" ) - warning.add_replacement(insert_pos, f" {command['args'][0]}") + warning.add_replacement(insert_span, f" {command['args'][0]}") def main() -> None: diff --git a/src/rapids_pre_commit_hooks/utils/toml.py b/src/rapids_pre_commit_hooks/utils/toml.py index 25295d0..c8ec3de 100644 --- a/src/rapids_pre_commit_hooks/utils/toml.py +++ b/src/rapids_pre_commit_hooks/utils/toml.py @@ -3,21 +3,22 @@ import copy import uuid +from typing import TYPE_CHECKING import tomlkit +if TYPE_CHECKING: + from ..lint import Span -_LocType = tuple[int, int] - -def find_value_location( +def find_value_span( document: "tomlkit.TOMLDocument", key: tuple[str, ...], *, append: bool, -) -> _LocType: +) -> "Span": """ - Find the exact location of a key in a stringified TOML document. + Find the exact span of a key in a stringified TOML document. Parameters ---------- @@ -30,12 +31,12 @@ def find_value_location( in a pyproject.toml, ``key = ("project", "license",)``. append : bool If ``True``, returns the location where new text will be added. - If ``False``, returns the range of characters to be replaced. + If ``False``, returns the span of characters to be replaced. Returns ------- - loc : tuple[int, int] - Location of the key and its value in the document. + span : tuple[int, int] + Span of the key and its value in the document. e.g., ``(20, 35)`` = "the 20th-35th characters, including newlines" * element 0: number of characters from beginning of the document to beginning of the section indicated by ``key`` @@ -57,8 +58,8 @@ def find_value_location( if append: node.add(str(placeholder), placeholder_toml) value_to_find = f"{placeholder} = {placeholder_repr}" - begin_loc = copied_document.as_string().find(value_to_find) - return begin_loc, begin_loc + begin_pos = copied_document.as_string().find(value_to_find) + return begin_pos, begin_pos # otherwise, if replacing without appending old_value = node[key[0]] @@ -71,6 +72,6 @@ def find_value_location( else (str(placeholder), placeholder_repr) ) node[key[0]] = placeholder_value - begin_loc = copied_document.as_string().find(value_to_find) - end_loc = begin_loc + len(old_value.as_string()) - return begin_loc, end_loc + begin_pos = copied_document.as_string().find(value_to_find) + end_pos = begin_pos + len(old_value.as_string()) + return begin_pos, end_pos diff --git a/tests/rapids_pre_commit_hooks/test_alpha_spec.py b/tests/rapids_pre_commit_hooks/test_alpha_spec.py index efbdb8a..1680c41 100644 --- a/tests/rapids_pre_commit_hooks/test_alpha_spec.py +++ b/tests/rapids_pre_commit_hooks/test_alpha_spec.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024-2025, NVIDIA CORPORATION. +# SPDX-FileCopyrightText: Copyright (c) 2024-2026, NVIDIA CORPORATION. # SPDX-License-Identifier: Apache-2.0 import contextlib @@ -720,12 +720,12 @@ def test_check_alpha_spec_integration(tmp_path): start = CONTENT.find(REPLACED) end = start + len(REPLACED) - pos = (start, end) + span = (start, end) expected_linter = lint.Linter( "dependencies.yaml", CONTENT, "verify-alpha-spec" ) expected_linter.add_warning( - pos, "add alpha spec for RAPIDS package cudf" - ).add_replacement(pos, "cudf>=24.04,<24.06,>=0.0.0a0") + span, "add alpha spec for RAPIDS package cudf" + ).add_replacement(span, "cudf>=24.04,<24.06,>=0.0.0a0") assert linter.warnings == expected_linter.warnings diff --git a/tests/rapids_pre_commit_hooks/test_codeowners.py b/tests/rapids_pre_commit_hooks/test_codeowners.py index 1cb5cd6..a508566 100644 --- a/tests/rapids_pre_commit_hooks/test_codeowners.py +++ b/tests/rapids_pre_commit_hooks/test_codeowners.py @@ -1,4 +1,4 @@ -# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. +# SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION. # SPDX-License-Identifier: Apache-2.0 from textwrap import dedent @@ -41,18 +41,18 @@ codeowners.CodeownersLine( file=codeowners.FilePattern( filename="filename", - pos=(0, 8), + span=(0, 8), ), owners=[ codeowners.Owner( owner="@owner1", - pos=(9, 16), - pos_with_leading_whitespace=(8, 16), + span=(9, 16), + span_with_leading_whitespace=(8, 16), ), codeowners.Owner( owner="@owner2", - pos=(17, 24), - pos_with_leading_whitespace=(16, 24), + span=(17, 24), + span_with_leading_whitespace=(16, 24), ), ], ), @@ -63,18 +63,18 @@ codeowners.CodeownersLine( file=codeowners.FilePattern( filename="filename", - pos=(1, 9), + span=(1, 9), ), owners=[ codeowners.Owner( owner="@owner1", - pos=(10, 17), - pos_with_leading_whitespace=(9, 17), + span=(10, 17), + span_with_leading_whitespace=(9, 17), ), codeowners.Owner( owner="@owner2", - pos=(18, 25), - pos_with_leading_whitespace=(17, 25), + span=(18, 25), + span_with_leading_whitespace=(17, 25), ), ], ), @@ -85,18 +85,18 @@ codeowners.CodeownersLine( file=codeowners.FilePattern( filename="filename", - pos=(0, 8), + span=(0, 8), ), owners=[ codeowners.Owner( owner="@owner1", - pos=(10, 17), - pos_with_leading_whitespace=(8, 17), + span=(10, 17), + span_with_leading_whitespace=(8, 17), ), codeowners.Owner( owner="@owner2", - pos=(19, 26), - pos_with_leading_whitespace=(17, 26), + span=(19, 26), + span_with_leading_whitespace=(17, 26), ), ], ), @@ -107,18 +107,18 @@ codeowners.CodeownersLine( file=codeowners.FilePattern( filename="file\\ name", - pos=(0, 10), + span=(0, 10), ), owners=[ codeowners.Owner( owner="@owner\\ 1", - pos=(11, 20), - pos_with_leading_whitespace=(10, 20), + span=(11, 20), + span_with_leading_whitespace=(10, 20), ), codeowners.Owner( owner="@owner\\ 2", - pos=(21, 30), - pos_with_leading_whitespace=(20, 30), + span=(21, 30), + span_with_leading_whitespace=(20, 30), ), ], ), @@ -140,7 +140,7 @@ def test_parse_codeowners_line(line, skip, codeowners_line): @pytest.mark.parametrize( - ["line", "pos", "warnings"], + ["line", "span", "warnings"], [ ( "CMakeLists.txt @rapidsai/cudf-cmake-codeowners", @@ -152,15 +152,15 @@ def test_parse_codeowners_line(line, skip, codeowners_line): (0, 14), [ LintWarning( - pos=(0, 14), + span=(0, 14), msg="file 'CMakeLists.txt' has incorrect owners", replacements=[ Replacement( - pos=(14, 28), + span=(14, 28), newtext="", ), Replacement( - pos=(28, 28), + span=(28, 28), newtext=" @rapidsai/cudf-cmake-codeowners", ), ], @@ -172,11 +172,11 @@ def test_parse_codeowners_line(line, skip, codeowners_line): (0, 14), [ LintWarning( - pos=(0, 14), + span=(0, 14), msg="file 'CMakeLists.txt' has incorrect owners", replacements=[ Replacement( - pos=(14, 28), + span=(14, 28), newtext="", ), ], @@ -191,7 +191,7 @@ def test_parse_codeowners_line(line, skip, codeowners_line): ], ) @patch_required_codeowners_lines -def test_check_codeowners_line(line, pos, warnings): +def test_check_codeowners_line(line, span, warnings): codeowners_line = codeowners.parse_codeowners_line(line, 0) linter = Linter(".github/CODEOWNERS", line, "verify-codeowners") found_files = [] @@ -200,7 +200,7 @@ def test_check_codeowners_line(line, pos, warnings): ) assert linter.warnings == warnings assert found_files == [ - (line, pos) + (line, span) for line in MOCK_REQUIRED_CODEOWNERS_LINES if line.file == codeowners_line.file.filename ] @@ -227,15 +227,15 @@ def test_check_codeowners_line(line, pos, warnings): ), [ LintWarning( - pos=(1, 15), + span=(1, 15), msg="file 'CMakeLists.txt' has incorrect owners", replacements=[ Replacement( - pos=(15, 29), + span=(15, 29), newtext="", ), Replacement( - pos=(29, 29), + span=(29, 29), newtext=" @rapidsai/cudf-cmake-codeowners", ), ], @@ -250,11 +250,11 @@ def test_check_codeowners_line(line, pos, warnings): ), [ LintWarning( - pos=(0, 0), + span=(0, 0), msg="missing required codeowners", replacements=[ Replacement( - pos=(40, 40), + span=(40, 40), newtext="CMakeLists.txt " "@rapidsai/cudf-cmake-codeowners\n", ), @@ -269,11 +269,11 @@ def test_check_codeowners_line(line, pos, warnings): ), [ LintWarning( - pos=(0, 0), + span=(0, 0), msg="missing required codeowners", replacements=[ Replacement( - pos=(39, 39), + span=(39, 39), newtext="\nCMakeLists.txt " "@rapidsai/cudf-cmake-codeowners\n", ), @@ -290,12 +290,12 @@ def test_check_codeowners_line(line, pos, warnings): ), [ LintWarning( - pos=(1, 15), + span=(1, 15), msg="file 'pyproject.toml' should come after " "'CMakeLists.txt'", notes=[ Note( - pos=(40, 54), + span=(40, 54), msg="file 'CMakeLists.txt' is here", ), ], diff --git a/tests/rapids_pre_commit_hooks/test_copyright.py b/tests/rapids_pre_commit_hooks/test_copyright.py index 1475dcc..a6f6300 100644 --- a/tests/rapids_pre_commit_hooks/test_copyright.py +++ b/tests/rapids_pre_commit_hooks/test_copyright.py @@ -24,7 +24,7 @@ Note, Replacement, ) -from rapids_pre_commit_hooks_test_utils import parse_named_ranges +from rapids_pre_commit_hooks_test_utils import parse_named_spans @pytest.mark.parametrize( @@ -565,7 +565,7 @@ def test_match_copyright(content, start, expected_match): ], ) def test_match_all_copyright(content): - content, r = parse_named_ranges(content, list) + content, spans = parse_named_spans(content, list) lines = Lines(content) assert list( copyright.match_all_copyright( @@ -583,7 +583,7 @@ def test_match_all_copyright(content): **match, }, ) - for match in r + for match in spans ] @@ -656,7 +656,7 @@ def test_compute_prefix(content, filename, index, expected_prefix): @pytest.mark.parametrize( - ["content", "index", "expected_pos"], + ["content", "pos", "expected_span"], [ pytest.param( dedent( @@ -835,12 +835,12 @@ def test_compute_prefix(content, filename, index, expected_prefix): ), ], ) -def test_find_long_form_text(content, index, expected_pos): +def test_find_long_form_text(content, pos, expected_span): assert ( copyright.find_long_form_text( - Lines(content), "file.txt", "Apache-2.0", index + Lines(content), "file.txt", "Apache-2.0", pos ) - == expected_pos + == expected_span ) diff --git a/tests/rapids_pre_commit_hooks/test_hardcoded_version.py b/tests/rapids_pre_commit_hooks/test_hardcoded_version.py index 6fb9fd5..f7168c5 100644 --- a/tests/rapids_pre_commit_hooks/test_hardcoded_version.py +++ b/tests/rapids_pre_commit_hooks/test_hardcoded_version.py @@ -8,7 +8,7 @@ from rapids_pre_commit_hooks import hardcoded_version from rapids_pre_commit_hooks.lint import Lines, LintWarning, Linter, Note -from rapids_pre_commit_hooks_test_utils import parse_named_ranges +from rapids_pre_commit_hooks_test_utils import parse_named_spans @pytest.mark.parametrize( @@ -97,12 +97,12 @@ ), ], ) -def test_get_excluded_sections_pyproject_toml(content): - content, ranges = parse_named_ranges(content, root_type=list) +def test_get_excluded_spans_pyproject_toml(content): + content, spans = parse_named_spans(content, root_type=list) linter = Linter("pyproject.toml", content, "verify-hardcoded-version") assert ( - list(hardcoded_version.get_excluded_sections_pyproject_toml(linter)) - == ranges + list(hardcoded_version.get_excluded_spans_pyproject_toml(linter)) + == spans ) @@ -148,10 +148,10 @@ def test_get_excluded_sections_pyproject_toml(content): ), ], ) -def test_get_excluded_sections(filename, content): - content, ranges = parse_named_ranges(content, root_type=list) +def test_get_excluded_spans(filename, content): + content, spans = parse_named_spans(content, root_type=list) linter = Linter(filename, content, "verify-hardcoded-version") - assert list(hardcoded_version.get_excluded_sections(linter)) == ranges + assert list(hardcoded_version.get_excluded_spans(linter)) == spans @pytest.mark.parametrize( @@ -219,12 +219,12 @@ def test_get_excluded_sections(filename, content): ], ) def test_is_deprecation_notice(content, expected_value): - content, ranges = parse_named_ranges(content) - match_range = ranges["match"] + content, spans = parse_named_spans(content) + match_span = spans["match"] lines = Lines(content) match = Mock( - start=Mock(return_value=match_range[0]), - end=Mock(return_value=match_range[1]), + start=Mock(return_value=match_span[0]), + end=Mock(return_value=match_span[1]), ) assert ( hardcoded_version.is_deprecation_notice(lines, match) == expected_value @@ -258,12 +258,12 @@ def test_is_deprecation_notice(content, expected_value): ], ) def test_is_number_array(content, expected_value): - content, ranges = parse_named_ranges(content) - match_range = ranges["match"] + content, spans = parse_named_spans(content) + match_span = spans["match"] lines = Lines(content) match = Mock( - start=Mock(return_value=match_range[0]), - end=Mock(return_value=match_range[1]), + start=Mock(return_value=match_span[0]), + end=Mock(return_value=match_span[1]), ) assert hardcoded_version.is_number_array(lines, match) == expected_value @@ -354,11 +354,11 @@ def test_is_number_array(content, expected_value): ], ) def test_is_version_doc(content, expected_value): - content, ranges = parse_named_ranges(content) - match_range = ranges["match"] + content, spans = parse_named_spans(content) + match_span = spans["match"] lines = Lines(content) - start = Mock(return_value=match_range[0]) - end = Mock(return_value=match_range[1]) + start = Mock(return_value=match_span[0]) + end = Mock(return_value=match_span[1]) match = Mock( start=start, end=end, @@ -592,13 +592,13 @@ def test_skip_heuristics( ], ) def test_find_hardcoded_versions(content, version): - content, r = parse_named_ranges(content, list) + content, spans = parse_named_spans(content, list) assert [ {group: match.span(group) for group in match.groupdict().keys()} for match in hardcoded_version.find_hardcoded_versions( content, version ) - ] == [{"patch": (-1, -1), **m} for m in r] + ] == [{"patch": (-1, -1), **s} for s in spans] @pytest.mark.parametrize( @@ -792,7 +792,7 @@ def test_check_hardcoded_version( version_file_read, message, ): - content, r = parse_named_ranges(content, list) + content, spans = parse_named_spans(content, list) linter = Linter(filename, content, "verify-hardcoded-version") with patch( "rapids_pre_commit_hooks.hardcoded_version.read_version_file", @@ -807,11 +807,11 @@ def test_check_hardcoded_version( mock_read_version_file.assert_not_called() assert linter.warnings == [ LintWarning( - m, + s, message, notes=[ Note( - m, + s, "if this is intentional (as part of a docstring or " "deprecation notice), suppress it with " "rapids-pre-commit-hooks: disable-next-line - see " @@ -820,5 +820,5 @@ def test_check_hardcoded_version( ) ], ) - for m in r + for s in spans ] diff --git a/tests/rapids_pre_commit_hooks/test_lint.py b/tests/rapids_pre_commit_hooks/test_lint.py index 0a184d7..4c003bf 100644 --- a/tests/rapids_pre_commit_hooks/test_lint.py +++ b/tests/rapids_pre_commit_hooks/test_lint.py @@ -15,7 +15,7 @@ LintMain, OverlappingReplacementsError, ) -from rapids_pre_commit_hooks_test_utils import parse_named_ranges +from rapids_pre_commit_hooks_test_utils import parse_named_spans class TestLines: @@ -27,7 +27,7 @@ class TestLines: @pytest.mark.parametrize( [ "content", - "expected_pos", + "expected_spans", "expected_lf_count", "expected_crlf_count", "expected_cr_count", @@ -128,17 +128,17 @@ class TestLines: ), ], ) - def test_pos( + def test_spans( self, content, - expected_pos, + expected_spans, expected_lf_count, expected_crlf_count, expected_cr_count, expected_newline_style, ): lines = Lines(content) - assert lines.pos == expected_pos + assert lines.spans == expected_spans assert lines.newline_count == { "\n": expected_lf_count, "\r\n": expected_crlf_count, @@ -217,13 +217,13 @@ def test_fix(self): ) with pytest.raises( OverlappingReplacementsError, - match=r"^Replacement\(pos=\(11, 12\), newtext=''\) overlaps with " - + r"Replacement\(pos=\(11, 12\), newtext='\.'\)$", + match=r"^Replacement\(span=\(11, 12\), newtext=''\) overlaps with " + + r"Replacement\(span=\(11, 12\), newtext='\.'\)$", ): linter.fix() def test_fix_disabled(self): - content, r = parse_named_ranges( + content, spans = parse_named_spans( """\ + # rapids-pre-commit-hooks: disable + Hello world! @@ -231,8 +231,8 @@ def test_fix_disabled(self): """ ) linter = Linter("test.txt", content, "test") - linter.add_warning(r["shout"], "don't shout").add_replacement( - r["shout"], "" + linter.add_warning(spans["shout"], "don't shout").add_replacement( + spans["shout"], "" ) assert linter.fix() == content @@ -449,10 +449,10 @@ def test_fix_disabled(self): def test_get_disabled_enabled_boundaries( self, content, warning_name, expected_boundaries ): - content, r = parse_named_ranges(content) + content, spans = parse_named_spans(content) assert Linter.get_disabled_enabled_boundaries( Lines(content), warning_name - ) == list(zip(r, expected_boundaries, strict=True)) + ) == list(zip(spans, expected_boundaries, strict=True)) @pytest.mark.parametrize( ["content", "expected_enabled"], @@ -468,7 +468,7 @@ def test_get_disabled_enabled_boundaries( : ^warning """, True, - id="empty-range-at-start", + id="empty-span-at-start", ), pytest.param( """\ @@ -476,7 +476,7 @@ def test_get_disabled_enabled_boundaries( : ^warning """, True, - id="empty-range-in-middle", + id="empty-span-in-middle", ), pytest.param( """\ @@ -484,7 +484,7 @@ def test_get_disabled_enabled_boundaries( : ^warning """, True, - id="empty-range-at-end", + id="empty-span-at-end", ), pytest.param( """\ @@ -540,13 +540,13 @@ def test_get_disabled_enabled_boundaries( ), ], ) - def test_is_warning_range_enabled(self, content, expected_enabled): - content, r = parse_named_ranges(content) + def test_is_warning_span_enabled(self, content, expected_enabled): + content, spans = parse_named_spans(content) boundaries = Linter.get_disabled_enabled_boundaries( Lines(content), "relevant" ) assert ( - Linter.is_warning_range_enabled(boundaries, r["warning"]) + Linter.is_warning_span_enabled(boundaries, spans["warning"]) == expected_enabled ) @@ -594,7 +594,7 @@ def bracket_file(self, tmp_path): @pytest.fixture def disabled_file_contents(self): - yield parse_named_ranges( + yield parse_named_spans( """\ + # rapids-pre-commit-hooks: disable + Hello! diff --git a/tests/rapids_pre_commit_hooks/test_pyproject_license.py b/tests/rapids_pre_commit_hooks/test_pyproject_license.py index 146fea1..675bb17 100644 --- a/tests/rapids_pre_commit_hooks/test_pyproject_license.py +++ b/tests/rapids_pre_commit_hooks/test_pyproject_license.py @@ -8,7 +8,7 @@ from rapids_pre_commit_hooks import pyproject_license from rapids_pre_commit_hooks.lint import LintWarning, Linter, Replacement -from rapids_pre_commit_hooks_test_utils import parse_named_ranges +from rapids_pre_commit_hooks_test_utils import parse_named_spans @pytest.mark.parametrize( @@ -247,7 +247,7 @@ def test_check_pyproject_license( message, replacement_text, ): - content, positions = parse_named_ranges(content) + content, spans = parse_named_spans(content) linter = Linter("pyproject.toml", content, "verify-pyproject-license") pyproject_license.check_pyproject_license(linter, Mock()) @@ -256,11 +256,11 @@ def test_check_pyproject_license( if message is None else [ LintWarning( - positions["warning"], + spans["warning"], message, replacements=[] if replacement_text is None - else [Replacement(positions["replacement"], replacement_text)], + else [Replacement(spans["replacement"], replacement_text)], ) ] ) diff --git a/tests/rapids_pre_commit_hooks/utils/test_toml.py b/tests/rapids_pre_commit_hooks/utils/test_toml.py index 3078717..9319c7c 100644 --- a/tests/rapids_pre_commit_hooks/utils/test_toml.py +++ b/tests/rapids_pre_commit_hooks/utils/test_toml.py @@ -4,8 +4,8 @@ import pytest import tomlkit -from rapids_pre_commit_hooks.utils.toml import find_value_location -from rapids_pre_commit_hooks_test_utils import parse_named_ranges +from rapids_pre_commit_hooks.utils.toml import find_value_span +from rapids_pre_commit_hooks_test_utils import parse_named_spans @pytest.mark.parametrize( @@ -53,8 +53,8 @@ ), ], ) -def test_find_value_location(key, append): - content, positions = parse_named_ranges( +def test_find_value_span(key, append): + content, spans = parse_named_spans( """\ + [table] + key1 = "value" @@ -76,9 +76,9 @@ def test_find_value_location(key, append): """ ) parsed_doc = tomlkit.loads(content) - loc = positions + span = spans for component in key: - loc = loc[component] - loc = loc["_append" if append else "_value"] - assert find_value_location(parsed_doc, key, append=append) == loc + span = span[component] + span = span["_append" if append else "_value"] + assert find_value_span(parsed_doc, key, append=append) == span assert parsed_doc.as_string() == content diff --git a/tests/test_testing_utils.py b/tests/test_testing_utils.py index 695f065..bc133d4 100644 --- a/tests/test_testing_utils.py +++ b/tests/test_testing_utils.py @@ -5,11 +5,11 @@ import pytest -from rapids_pre_commit_hooks_test_utils import ParseError, parse_named_ranges +from rapids_pre_commit_hooks_test_utils import ParseError, parse_named_spans @pytest.mark.parametrize( - ["content", "root_type", "expected_content", "expected_ranges", "context"], + ["content", "root_type", "expected_content", "expected_spans", "context"], [ pytest.param( "+", @@ -41,7 +41,7 @@ "Hello\nworld!\n", {}, contextlib.nullcontext(), - id="no-ranges", + id="no-spans", ), pytest.param( "+ Hello\n+ world!\n", @@ -49,7 +49,7 @@ "Hello\nworld!\n", {}, contextlib.nullcontext(), - id="no-ranges-empty-last-line", + id="no-spans-empty-last-line", ), pytest.param( """\ @@ -60,7 +60,7 @@ "Hello\nworld!\n", {}, contextlib.nullcontext(), - id="no-ranges-empty-range-line", + id="no-spans-empty-span-line", ), pytest.param( """\ @@ -71,7 +71,7 @@ "Hello\nworld!", {}, contextlib.nullcontext(), - id="no-ranges-no-newline", + id="no-spans-no-newline", ), pytest.param( """\ @@ -82,33 +82,33 @@ "Hello world!", {}, contextlib.nullcontext(), - id="no-ranges-multiple-no-newlines", + id="no-spans-multiple-no-newlines", ), pytest.param( """\ + Hello - : ^group1 + : ^span1 """, dict, "Hello\n", { - "group1": (1, 1), + "span1": (1, 1), }, contextlib.nullcontext(), - id="single-empty-group", + id="single-empty-span", ), pytest.param( """\ > Hello - : ^group1 + : ^span1 """, dict, "Hello", { - "group1": (1, 1), + "span1": (1, 1), }, contextlib.nullcontext(), - id="single-empty-group-no-newline", + id="single-empty-span-no-newline", ), pytest.param( """\ @@ -121,22 +121,22 @@ "end": (6, 6), }, contextlib.nullcontext(), - id="single-empty-group-at-end", + id="single-empty-span-at-end", ), pytest.param( """\ + Hello - : ^group1 - : ^group2 + : ^span1 + : ^span2 """, dict, "Hello\n", { - "group1": (1, 1), - "group2": (2, 2), + "span1": (1, 1), + "span2": (2, 2), }, contextlib.nullcontext(), - id="multiple-empty-groups", + id="multiple-empty-spans", ), pytest.param( """\ @@ -150,46 +150,46 @@ "b": (4, 4), }, contextlib.nullcontext(), - id="multiple-empty-groups-one-line", + id="multiple-empty-spans-one-line", ), pytest.param( """\ + Hello - : ~~group1 + : ~~span1 """, dict, "Hello\n", { - "group1": (1, 3), + "span1": (1, 3), }, contextlib.nullcontext(), - id="single-nonempty-group", + id="single-nonempty-span", ), pytest.param( """\ + Hello - : >large_group + : >large_span + world + again - : !large_group + : !large_span """, dict, "Hello\nworld\nagain\n", { - "large_group": (1, 12), + "large_span": (1, 12), }, contextlib.nullcontext(), - id="large-group", + id="large-span", ), pytest.param( """\ + Hello - : ~~group1 # This is the first group + : ~~span1 # This is the first span """, dict, "Hello\n", { - "group1": (1, 3), + "span1": (1, 3), }, contextlib.nullcontext(), id="comment", @@ -197,15 +197,15 @@ pytest.param( """\ + Hello - : ~g#~g + : ~s#~s """, dict, "Hello\n", { - "g": (0, 1), + "s": (0, 1), }, contextlib.nullcontext(), - id="comment-with-range", + id="comment-with-span", ), pytest.param( """\ @@ -221,89 +221,89 @@ pytest.param( """\ + Hello - : ~~~~group1 + : ~~~~span1 """, dict, "Hello\n", { - "group1": (1, 5), + "span1": (1, 5), }, contextlib.nullcontext(), - id="single-nonempty-group-to-end-of-line", + id="single-nonempty-span-to-end-of-line", ), pytest.param( """\ + Hello - : ~~~~~group1 + : ~~~~~span1 """, dict, "Hello\n", { - "group1": (1, 6), + "span1": (1, 6), }, contextlib.nullcontext(), - id="single-line-ending-group", + id="single-line-ending-span", ), pytest.param( """\ + Hello - : ~~~~~group1 + : ~~~~~span1 + world! - : ~~group1 + : ~~span1 """, dict, "Hello\nworld!\n", { - "group1": (1, 8), + "span1": (1, 8), }, contextlib.nullcontext(), - id="single-multiline-group", + id="single-multiline-span", ), pytest.param( """\ + Hello - : ~~~~~group1 - : ~~~group2 + : ~~~~~span1 + : ~~~span2 + world! - : ~~group1 - : ~group2 + : ~~span1 + : ~span2 """, dict, "Hello\nworld!\n", { - "group1": (1, 8), - "group2": (3, 7), + "span1": (1, 8), + "span2": (3, 7), }, contextlib.nullcontext(), - id="multiple-multiline-groups", + id="multiple-multiline-spans", ), pytest.param( """\ + Hello - : ~~group1 - : ~~group1 + : ~~span1 + : ~~span1 """, dict, "Hello\n", { - "group1": (0, 4), + "span1": (0, 4), }, contextlib.nullcontext(), - id="joined-group-forward", + id="joined-span-forward", ), pytest.param( """\ + Hello - : ~~group1 - : ~~group1 + : ~~span1 + : ~~span1 """, dict, "Hello\n", { - "group1": (0, 4), + "span1": (0, 4), }, contextlib.nullcontext(), - id="joined-group-reverse", + id="joined-span-reverse", ), pytest.param( """\ @@ -438,45 +438,45 @@ pytest.param( """\ + Hello - : ~~~~group1 + : ~~~~span1 + world! - : ~group1 + : ~span1 """, dict, None, None, pytest.raises(ParseError), - id="broken-multiline-group-first", + id="broken-multiline-span-first", ), pytest.param( """\ + Hello - : ~~~~~group1 + : ~~~~~span1 + world! - : ~group1 + : ~span1 """, dict, None, None, pytest.raises(ParseError), - id="broken-multiline-group-second", + id="broken-multiline-span-second", ), pytest.param( """\ + Hello - : ~~g - : ~~g + : ~~s + : ~~s """, dict, None, None, pytest.raises(ParseError), - id="overlapping-group", + id="overlapping-span", ), pytest.param( """\ + Hello - : ~~~~~~group1 + : ~~~~~~span1 """, dict, None, @@ -487,7 +487,7 @@ pytest.param( """\ + Hello - : a ~group1 + : a ~span1 """, dict, None, @@ -498,7 +498,7 @@ pytest.param( """\ + Hello - : ~group1 a + : ~span1 a """, dict, None, @@ -509,7 +509,7 @@ pytest.param( """\ + Hello - @ ~group1 + @ ~span1 """, dict, None, @@ -547,7 +547,7 @@ None, None, pytest.raises(ParseError), - id="overwrite-range-with-dict", + id="overwrite-span-with-dict", ), pytest.param( """\ @@ -559,7 +559,7 @@ None, None, pytest.raises(ParseError), - id="overwrite-dict-with-range", + id="overwrite-dict-with-span", ), pytest.param( """\ @@ -615,7 +615,7 @@ None, None, pytest.raises(ParseError), - id="group-on-no-content", + id="span-on-no-content", ), pytest.param( """\ @@ -631,46 +631,46 @@ pytest.param( """\ + Hello - : >g - : >g - : !g + : >s + : >s + : !s """, dict, None, None, pytest.raises(ParseError), - id="duplicate-large-group", + id="duplicate-large-span", ), pytest.param( """\ + Hello - : >g - : !g - : !g + : >s + : !s + : !s """, dict, None, None, pytest.raises(ParseError), - id="double-terminate-large-group", + id="double-terminate-large-span", ), pytest.param( """\ + Hello - : >g + : >s """, dict, None, None, pytest.raises(ParseError), - id="unterminated-large-group", + id="unterminated-large-span", ), ], ) -def test_parse_named_ranges( - content, root_type, expected_content, expected_ranges, context +def test_parse_named_spans( + content, root_type, expected_content, expected_spans, context ): with context: - content, named_ranges = parse_named_ranges(content, root_type) + content, spans = parse_named_spans(content, root_type) assert content == expected_content - assert named_ranges == expected_ranges + assert spans == expected_spans diff --git a/tests/utils/rapids_pre_commit_hooks_test_utils.py b/tests/utils/rapids_pre_commit_hooks_test_utils.py index 68df699..4431984 100644 --- a/tests/utils/rapids_pre_commit_hooks_test_utils.py +++ b/tests/utils/rapids_pre_commit_hooks_test_utils.py @@ -11,15 +11,14 @@ if TYPE_CHECKING: from typing import TypeGuard - NamedRanges = ( - dict[str, "tuple[int, int] | NamedRanges"] - | list["tuple[int, int] | NamedRanges"] - ) - _NamedRanges = dict[str | int, "tuple[int, int] | _NamedRanges"] + from rapids_pre_commit_hooks.lint import Span + + NamedSpans = dict[str, "Span | NamedSpans"] | list["Span | NamedSpans"] + _NamedSpans = dict[str | int, "Span | _NamedSpans"] -_RANGE_LINE_RE: re.Pattern = re.compile( - r"(?P\^|>|!|~+)" +_SPAN_LINE_RE: re.Pattern = re.compile( + r"(?P\^|>|!|~+)" r"(?P" r"(?:[0-9]+|[a-zA-Z_][a-zA-Z0-9_]*)" r"(?:\.(?:[0-9]+|[a-zA-Z_][a-zA-Z0-9_]*))*" @@ -38,21 +37,21 @@ def _parse_path_item(item: str) -> str | int: return item -def parse_named_ranges( +def parse_named_spans( content: str, root_type: type | None = None -) -> "tuple[str, NamedRanges | None]": +) -> "tuple[str, NamedSpans | None]": assert root_type is dict or root_type is list or root_type is None lines = Lines(dedent(content)) content = "" - named_ranges: "_NamedRanges | None" = None + named_spans: "_NamedSpans | None" = None in_progress_large_groups: dict[tuple[int | str, ...], int] = {} - def get_last_collection(path: tuple[int | str, ...]) -> "_NamedRanges": - nonlocal named_ranges - last_collection: "_NamedRanges | None" = named_ranges + def get_last_collection(path: tuple[int | str, ...]) -> "_NamedSpans": + nonlocal named_spans + last_collection: "_NamedSpans | None" = named_spans for item in path[:-1]: if last_collection is None: - last_collection = named_ranges = {} + last_collection = named_spans = {} try: next_collection = last_collection[item] except KeyError: @@ -60,8 +59,8 @@ def get_last_collection(path: tuple[int | str, ...]) -> "_NamedRanges": if not isinstance(next_collection, dict): raise ParseError last_collection = next_collection - if named_ranges is None: - named_ranges = last_collection = {} + if named_spans is None: + named_spans = last_collection = {} else: assert last_collection is not None return last_collection @@ -69,10 +68,10 @@ def get_last_collection(path: tuple[int | str, ...]) -> "_NamedRanges": start_of_last_line = 0 end_of_last_line = 0 newline = False - for this_pos, next_pos in itertools.pairwise( - itertools.chain(lines.pos, [(len(lines.content), -1)]) + for this_span, next_span in itertools.pairwise( + itertools.chain(lines.spans, [(len(lines.content), -1)]) ): - line = lines.content[this_pos[0] : this_pos[1]] + line = lines.content[this_span[0] : this_span[1]] first_two_chars = line[0:2] if first_two_chars in {"+ ", "+"}: @@ -80,20 +79,20 @@ def get_last_collection(path: tuple[int | str, ...]) -> "_NamedRanges": start_of_last_line = len(content) end_of_last_line = ( start_of_last_line - + this_pos[1] - - this_pos[0] + + this_span[1] + - this_span[0] - len(first_two_chars) ) content += lines.content[ - this_pos[0] + len(first_two_chars) : next_pos[0] + this_span[0] + len(first_two_chars) : next_span[0] ] elif first_two_chars in {"> ", ">"}: newline = False start_of_last_line = len(content) end_of_last_line = ( start_of_last_line - + this_pos[1] - - this_pos[0] + + this_span[1] + - this_span[0] - len(first_two_chars) ) content += line[len(first_two_chars) :] @@ -102,7 +101,7 @@ def get_last_collection(path: tuple[int | str, ...]) -> "_NamedRanges": if (pound := directive_line.find("#")) >= 0: directive_line = directive_line[:pound] end = 0 - for match in _RANGE_LINE_RE.finditer(directive_line): + for match in _SPAN_LINE_RE.finditer(directive_line): if any( filter( lambda c: c != " ", directive_line[end : match.start()] @@ -115,55 +114,55 @@ def get_last_collection(path: tuple[int | str, ...]) -> "_NamedRanges": map(_parse_path_item, match.group("path").split(".")) ) - if match.group("range") == ">": + if match.group("span") == ">": if path in in_progress_large_groups: raise ParseError in_progress_large_groups[path] = ( - start_of_last_line + match.start("range") + start_of_last_line + match.start("span") ) else: - range_start = start_of_last_line + match.start("range") - if match.group("range") == "^": - range_end = range_start - elif match.group("range") == "!": - range_end = range_start + span_start = start_of_last_line + match.start("span") + if match.group("span") == "^": + span_end = span_start + elif match.group("span") == "!": + span_end = span_start try: - range_start = in_progress_large_groups.pop(path) + span_start = in_progress_large_groups.pop(path) except KeyError as e: raise ParseError from e elif ( - match.end("range") + match.end("span") == end_of_last_line - start_of_last_line + 1 ): if not newline: raise ParseError - range_end = len(content) + span_end = len(content) else: - range_end = start_of_last_line + match.end("range") + span_end = start_of_last_line + match.end("span") - range = (range_start, range_end) + span = (span_start, span_end) - if max(*range) > len(content): + if max(*span) > len(content): raise ParseError last_collection = get_last_collection(path) try: - existing_range = last_collection[path[-1]] + existing_span = last_collection[path[-1]] except KeyError: - last_collection[path[-1]] = range + last_collection[path[-1]] = span else: - if not isinstance(existing_range, tuple): + if not isinstance(existing_span, tuple): raise ParseError - if range[0] == existing_range[1]: + if span[0] == existing_span[1]: last_collection[path[-1]] = ( - existing_range[0], - range[1], + existing_span[0], + span[1], ) - elif range[1] == existing_range[0]: + elif span[1] == existing_span[0]: last_collection[path[-1]] = ( - range[0], - existing_range[1], + span[0], + existing_span[1], ) else: raise ParseError @@ -175,23 +174,23 @@ def get_last_collection(path: tuple[int | str, ...]) -> "_NamedRanges": ) ): raise ParseError - elif line != "" or next_pos[1] >= 0: + elif line != "" or next_span[1] >= 0: raise ParseError if any(in_progress_large_groups): raise ParseError def is_list_filled( - collection: "list[None | tuple[int, int] | NamedRanges]", - ) -> "TypeGuard[list[tuple[int, int] | NamedRanges]]": + collection: "list[None | Span | NamedSpans]", + ) -> "TypeGuard[list[Span | NamedSpans]]": return all(map(lambda i: i is not None, collection)) - def postprocess(named_ranges: "_NamedRanges") -> "NamedRanges": + def postprocess(named_spans: "_NamedSpans") -> "NamedSpans": collection: """ - dict[str, tuple[int, int] | NamedRanges] | - list[None | tuple[int, int] | NamedRanges] | None + dict[str, "Span | NamedSpans"] | + list[None | "Span | NamedSpans"] | None """ = None - for k, v in named_ranges.items(): + for k, v in named_spans.items(): if isinstance(k, str): if collection is None: collection = {} @@ -215,8 +214,8 @@ def postprocess(named_ranges: "_NamedRanges") -> "NamedRanges": postprocessed = ( (None if root_type is None else root_type()) - if named_ranges is None - else postprocess(named_ranges) + if named_spans is None + else postprocess(named_spans) ) if root_type is not None and not isinstance(postprocessed, root_type): raise ParseError