Compare commits
2 Commits
dhruv/synt
...
dhruv/form
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ce4d4ae6ac | ||
|
|
128414cd95 |
80
.github/workflows/sync_typeshed.yaml
vendored
80
.github/workflows/sync_typeshed.yaml
vendored
@@ -1,80 +0,0 @@
|
||||
name: Sync typeshed
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
# Run on the 1st and the 15th of every month:
|
||||
- cron: "0 0 1,15 * *"
|
||||
|
||||
env:
|
||||
FORCE_COLOR: 1
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
|
||||
jobs:
|
||||
sync:
|
||||
name: Sync typeshed
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
# Don't run the cron job on forks:
|
||||
if: ${{ github.repository == 'astral-sh/ruff' || github.event_name != 'schedule' }}
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
name: Checkout Ruff
|
||||
with:
|
||||
path: ruff
|
||||
- uses: actions/checkout@v4
|
||||
name: Checkout typeshed
|
||||
with:
|
||||
repository: python/typeshed
|
||||
path: typeshed
|
||||
- name: Setup git
|
||||
run: |
|
||||
git config --global user.name typeshedbot
|
||||
git config --global user.email '<>'
|
||||
- name: Sync typeshed
|
||||
id: sync
|
||||
run: |
|
||||
rm -rf ruff/crates/red_knot/vendor/typeshed
|
||||
mkdir ruff/crates/red_knot/vendor/typeshed
|
||||
cp typeshed/README.md ruff/crates/red_knot/vendor/typeshed
|
||||
cp typeshed/LICENSE ruff/crates/red_knot/vendor/typeshed
|
||||
cp -r typeshed/stdlib ruff/crates/red_knot/vendor/typeshed/stdlib
|
||||
rm -rf ruff/crates/red_knot/vendor/typeshed/stdlib/@tests
|
||||
git -C typeshed rev-parse HEAD > ruff/crates/red_knot/vendor/typeshed/source_commit.txt
|
||||
- name: Commit the changes
|
||||
id: commit
|
||||
if: ${{ steps.sync.outcome == 'success' }}
|
||||
run: |
|
||||
cd ruff
|
||||
git checkout -b typeshedbot/sync-typeshed
|
||||
git add .
|
||||
git diff --staged --quiet || git commit -m "Sync typeshed. Source commit: https://github.com/python/typeshed/commit/$(git -C ../typeshed rev-parse HEAD)"
|
||||
- name: Create a PR
|
||||
if: ${{ steps.sync.outcome == 'success' && steps.commit.outcome == 'success' }}
|
||||
run: |
|
||||
cd ruff
|
||||
git push --force origin typeshedbot/sync-typeshed
|
||||
gh pr list --repo $GITHUB_REPOSITORY --head typeshedbot/sync-typeshed --json id --jq length | grep 1 && exit 0 # exit if there is existing pr
|
||||
gh pr create --title "Sync vendored typeshed stubs" --body "Close and reopen this PR to trigger CI" --label "internal"
|
||||
|
||||
create-issue-on-failure:
|
||||
name: Create an issue if the typeshed sync failed
|
||||
runs-on: ubuntu-latest
|
||||
needs: [sync]
|
||||
if: ${{ github.repository == 'astral-sh/ruff' && always() && github.event_name == 'schedule' && needs.sync.result == 'failure' }}
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
await github.rest.issues.create({
|
||||
owner: "astral-sh",
|
||||
repo: "ruff",
|
||||
title: `Automated typeshed sync failed on ${new Date().toDateString()}`,
|
||||
body: "Runs are listed here: https://github.com/astral-sh/ruff/actions/workflows/sync_typeshed.yaml",
|
||||
})
|
||||
@@ -637,11 +637,11 @@ Otherwise, follow the instructions from the linux section.
|
||||
`cargo dev` is a shortcut for `cargo run --package ruff_dev --bin ruff_dev`. You can run some useful
|
||||
utils with it:
|
||||
|
||||
- `cargo dev print-ast <file>`: Print the AST of a python file using Ruff's
|
||||
[Python parser](https://github.com/astral-sh/ruff/tree/main/crates/ruff_python_parser).
|
||||
For `if True: pass # comment`, you can see the syntax tree, the byte offsets for start and
|
||||
stop of each node and also how the `:` token, the comment and whitespace are not represented
|
||||
anymore:
|
||||
- `cargo dev print-ast <file>`: Print the AST of a python file using the
|
||||
[RustPython parser](https://github.com/astral-sh/ruff/tree/main/crates/ruff_python_parser) that is
|
||||
mainly used in Ruff. For `if True: pass # comment`, you can see the syntax tree, the byte offsets
|
||||
for start and stop of each node and also how the `:` token, the comment and whitespace are not
|
||||
represented anymore:
|
||||
|
||||
```text
|
||||
[
|
||||
|
||||
22
Cargo.toml
22
Cargo.toml
@@ -12,28 +12,6 @@ authors = ["Charlie Marsh <charlie.r.marsh@gmail.com>"]
|
||||
license = "MIT"
|
||||
|
||||
[workspace.dependencies]
|
||||
ruff = { path = "crates/ruff" }
|
||||
ruff_cache = { path = "crates/ruff_cache" }
|
||||
ruff_diagnostics = { path = "crates/ruff_diagnostics" }
|
||||
ruff_formatter = { path = "crates/ruff_formatter" }
|
||||
ruff_index = { path = "crates/ruff_index" }
|
||||
ruff_linter = { path = "crates/ruff_linter" }
|
||||
ruff_macros = { path = "crates/ruff_macros" }
|
||||
ruff_notebook = { path = "crates/ruff_notebook" }
|
||||
ruff_python_ast = { path = "crates/ruff_python_ast" }
|
||||
ruff_python_codegen = { path = "crates/ruff_python_codegen" }
|
||||
ruff_python_formatter = { path = "crates/ruff_python_formatter" }
|
||||
ruff_python_index = { path = "crates/ruff_python_index" }
|
||||
ruff_python_literal = { path = "crates/ruff_python_literal" }
|
||||
ruff_python_parser = { path = "crates/ruff_python_parser" }
|
||||
ruff_python_semantic = { path = "crates/ruff_python_semantic" }
|
||||
ruff_python_stdlib = { path = "crates/ruff_python_stdlib" }
|
||||
ruff_python_trivia = { path = "crates/ruff_python_trivia" }
|
||||
ruff_server = { path = "crates/ruff_server" }
|
||||
ruff_source_file = { path = "crates/ruff_source_file" }
|
||||
ruff_text_size = { path = "crates/ruff_text_size" }
|
||||
ruff_workspace = { path = "crates/ruff_workspace" }
|
||||
|
||||
aho-corasick = { version = "1.1.3" }
|
||||
annotate-snippets = { version = "0.9.2", features = ["color"] }
|
||||
anyhow = { version = "1.0.80" }
|
||||
|
||||
@@ -266,11 +266,6 @@ The remaining configuration options can be provided through a catch-all `--confi
|
||||
ruff check --config "lint.per-file-ignores = {'some_file.py' = ['F841']}"
|
||||
```
|
||||
|
||||
To opt in to the latest lint rules, formatter style changes, interface updates, and more, enable
|
||||
[preview mode](https://docs.astral.sh/ruff/rules/) by setting `preview = true` in your configuration
|
||||
file or passing `--preview` on the command line. Preview mode enables a collection of unstable
|
||||
features that may change prior to stabilization.
|
||||
|
||||
See `ruff help` for more on Ruff's top-level commands, or `ruff help check` and `ruff help format`
|
||||
for more on the linting and formatting commands, respectively.
|
||||
|
||||
|
||||
@@ -12,11 +12,11 @@ license.workspace = true
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
ruff_python_parser = { workspace = true }
|
||||
ruff_python_ast = { workspace = true }
|
||||
ruff_text_size = { workspace = true }
|
||||
ruff_index = { workspace = true }
|
||||
ruff_notebook = { workspace = true }
|
||||
ruff_python_parser = { path = "../ruff_python_parser" }
|
||||
ruff_python_ast = { path = "../ruff_python_ast" }
|
||||
ruff_text_size = { path = "../ruff_text_size" }
|
||||
ruff_index = { path = "../ruff_index" }
|
||||
ruff_notebook = { path = "../ruff_notebook" }
|
||||
|
||||
anyhow = { workspace = true }
|
||||
bitflags = { workspace = true }
|
||||
|
||||
@@ -6,4 +6,13 @@ The Red Knot crate contains code working towards multifile analysis, type infere
|
||||
|
||||
Red Knot vendors [typeshed](https://github.com/python/typeshed)'s stubs for the standard library. The vendored stubs can be found in `crates/red_knot/vendor/typeshed`. The file `crates/red_knot/vendor/typeshed/source_commit.txt` tells you the typeshed commit that our vendored stdlib stubs currently correspond to.
|
||||
|
||||
The typeshed stubs are updated every two weeks via an automated PR using the `sync_typeshed.yaml` workflow in the `.github/workflows` directory. This workflow can also be triggered at any time via [workflow dispatch](https://docs.github.com/en/actions/using-workflows/manually-running-a-workflow#running-a-workflow).
|
||||
Updating the vendored stubs is currently done manually. On a Unix machine, follow the following steps (if you have a typeshed clone in a `typeshed` directory, and a Ruff clone in a `ruff` directory):
|
||||
|
||||
```shell
|
||||
rm -rf ruff/crates/red_knot/vendor/typeshed
|
||||
mkdir ruff/crates/red_knot/vendor/typeshed
|
||||
cp typeshed/README.md ruff/crates/red_knot/vendor/typeshed
|
||||
cp typeshed/LICENSE ruff/crates/red_knot/vendor/typeshed
|
||||
cp -r typeshed/stdlib ruff/crates/red_knot/vendor/typeshed/stdlib
|
||||
git -C typeshed rev-parse HEAD > ruff/crates/red_knot/vendor/typeshed/source_commit.txt
|
||||
```
|
||||
|
||||
@@ -1 +1 @@
|
||||
a9d7e861f7a46ae7acd56569326adef302e10f29
|
||||
2d33fe212221a05661c0db5215a91cf3d7b7f072
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
# Implicit protocols used in importlib.
|
||||
# We intentionally omit deprecated and optional methods.
|
||||
|
||||
from collections.abc import Sequence
|
||||
from importlib.machinery import ModuleSpec
|
||||
from types import ModuleType
|
||||
from typing import Protocol
|
||||
|
||||
__all__ = ["LoaderProtocol", "MetaPathFinderProtocol", "PathEntryFinderProtocol"]
|
||||
|
||||
class LoaderProtocol(Protocol):
|
||||
def load_module(self, fullname: str, /) -> ModuleType: ...
|
||||
|
||||
class MetaPathFinderProtocol(Protocol):
|
||||
def find_spec(self, fullname: str, path: Sequence[str] | None, target: ModuleType | None = ..., /) -> ModuleSpec | None: ...
|
||||
|
||||
class PathEntryFinderProtocol(Protocol):
|
||||
def find_spec(self, fullname: str, target: ModuleType | None = ..., /) -> ModuleSpec | None: ...
|
||||
@@ -31,7 +31,7 @@ from _typeshed import (
|
||||
)
|
||||
from collections.abc import Awaitable, Callable, Iterable, Iterator, MutableSet, Reversible, Set as AbstractSet, Sized
|
||||
from io import BufferedRandom, BufferedReader, BufferedWriter, FileIO, TextIOWrapper
|
||||
from types import CellType, CodeType, TracebackType
|
||||
from types import CodeType, TracebackType, _Cell
|
||||
|
||||
# mypy crashes if any of {ByteString, Sequence, MutableSequence, Mapping, MutableMapping} are imported from collections.abc in builtins.pyi
|
||||
from typing import ( # noqa: Y022
|
||||
@@ -951,7 +951,7 @@ class tuple(Sequence[_T_co]):
|
||||
class function:
|
||||
# Make sure this class definition stays roughly in line with `types.FunctionType`
|
||||
@property
|
||||
def __closure__(self) -> tuple[CellType, ...] | None: ...
|
||||
def __closure__(self) -> tuple[_Cell, ...] | None: ...
|
||||
__code__: CodeType
|
||||
__defaults__: tuple[Any, ...] | None
|
||||
__dict__: dict[str, Any]
|
||||
@@ -1333,7 +1333,7 @@ if sys.version_info >= (3, 11):
|
||||
locals: Mapping[str, object] | None = None,
|
||||
/,
|
||||
*,
|
||||
closure: tuple[CellType, ...] | None = None,
|
||||
closure: tuple[_Cell, ...] | None = None,
|
||||
) -> None: ...
|
||||
|
||||
else:
|
||||
@@ -1794,7 +1794,7 @@ def __import__(
|
||||
fromlist: Sequence[str] = (),
|
||||
level: int = 0,
|
||||
) -> types.ModuleType: ...
|
||||
def __build_class__(func: Callable[[], CellType | Any], name: str, /, *bases: Any, metaclass: Any = ..., **kwds: Any) -> Any: ...
|
||||
def __build_class__(func: Callable[[], _Cell | Any], name: str, /, *bases: Any, metaclass: Any = ..., **kwds: Any) -> Any: ...
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from types import EllipsisType
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import sys
|
||||
from _typeshed import StrOrBytesPath
|
||||
from collections.abc import Iterator, MutableMapping
|
||||
from types import TracebackType
|
||||
from typing import Literal
|
||||
@@ -93,10 +91,5 @@ class _error(Exception): ...
|
||||
|
||||
error: tuple[type[_error], type[OSError]]
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
def whichdb(filename: StrOrBytesPath) -> str | None: ...
|
||||
def open(file: StrOrBytesPath, flag: _TFlags = "r", mode: int = 0o666) -> _Database: ...
|
||||
|
||||
else:
|
||||
def whichdb(filename: str) -> str | None: ...
|
||||
def open(file: str, flag: _TFlags = "r", mode: int = 0o666) -> _Database: ...
|
||||
def whichdb(filename: str) -> str | None: ...
|
||||
def open(file: str, flag: _TFlags = "r", mode: int = 0o666) -> _Database: ...
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import sys
|
||||
from _typeshed import StrOrBytesPath
|
||||
from collections.abc import Iterator, MutableMapping
|
||||
from types import TracebackType
|
||||
from typing_extensions import Self, TypeAlias
|
||||
@@ -30,8 +28,4 @@ class _Database(MutableMapping[_KeyType, bytes]):
|
||||
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
|
||||
) -> None: ...
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
def open(file: StrOrBytesPath, flag: str = "c", mode: int = 0o666) -> _Database: ...
|
||||
|
||||
else:
|
||||
def open(file: str, flag: str = "c", mode: int = 0o666) -> _Database: ...
|
||||
def open(file: str, flag: str = "c", mode: int = 0o666) -> _Database: ...
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import sys
|
||||
from _typeshed import ReadOnlyBuffer, StrOrBytesPath
|
||||
from _typeshed import ReadOnlyBuffer
|
||||
from types import TracebackType
|
||||
from typing import TypeVar, overload
|
||||
from typing_extensions import Self, TypeAlias
|
||||
@@ -38,7 +38,4 @@ if sys.platform != "win32":
|
||||
__new__: None # type: ignore[assignment]
|
||||
__init__: None # type: ignore[assignment]
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
def open(filename: StrOrBytesPath, flags: str = "r", mode: int = 0o666, /) -> _gdbm: ...
|
||||
else:
|
||||
def open(filename: str, flags: str = "r", mode: int = 0o666, /) -> _gdbm: ...
|
||||
def open(filename: str, flags: str = "r", mode: int = 0o666, /) -> _gdbm: ...
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import sys
|
||||
from _typeshed import ReadOnlyBuffer, StrOrBytesPath
|
||||
from _typeshed import ReadOnlyBuffer
|
||||
from types import TracebackType
|
||||
from typing import TypeVar, overload
|
||||
from typing_extensions import Self, TypeAlias
|
||||
@@ -34,7 +34,4 @@ if sys.platform != "win32":
|
||||
__new__: None # type: ignore[assignment]
|
||||
__init__: None # type: ignore[assignment]
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
def open(filename: StrOrBytesPath, flags: str = "r", mode: int = 0o666, /) -> _dbm: ...
|
||||
else:
|
||||
def open(filename: str, flags: str = "r", mode: int = 0o666, /) -> _dbm: ...
|
||||
def open(filename: str, flags: str = "r", mode: int = 0o666, /) -> _dbm: ...
|
||||
|
||||
@@ -64,7 +64,7 @@ class SourceLoader(ResourceLoader, ExecutionLoader, metaclass=ABCMeta):
|
||||
|
||||
# The base classes differ starting in 3.10:
|
||||
if sys.version_info >= (3, 10):
|
||||
# Please keep in sync with _typeshed.importlib.MetaPathFinderProtocol
|
||||
# Please keep in sync with sys._MetaPathFinder
|
||||
class MetaPathFinder(metaclass=ABCMeta):
|
||||
if sys.version_info < (3, 12):
|
||||
def find_module(self, fullname: str, path: Sequence[str] | None) -> Loader | None: ...
|
||||
@@ -85,7 +85,7 @@ if sys.version_info >= (3, 10):
|
||||
def find_spec(self, fullname: str, target: types.ModuleType | None = ...) -> ModuleSpec | None: ...
|
||||
|
||||
else:
|
||||
# Please keep in sync with _typeshed.importlib.MetaPathFinderProtocol
|
||||
# Please keep in sync with sys._MetaPathFinder
|
||||
class MetaPathFinder(Finder):
|
||||
def find_module(self, fullname: str, path: Sequence[str] | None) -> Loader | None: ...
|
||||
def invalidate_caches(self) -> None: ...
|
||||
|
||||
@@ -3,7 +3,6 @@ import importlib.machinery
|
||||
import sys
|
||||
import types
|
||||
from _typeshed import ReadableBuffer, StrOrBytesPath
|
||||
from _typeshed.importlib import LoaderProtocol
|
||||
from collections.abc import Callable
|
||||
from typing import Any
|
||||
from typing_extensions import ParamSpec
|
||||
@@ -24,13 +23,13 @@ def source_from_cache(path: str) -> str: ...
|
||||
def decode_source(source_bytes: ReadableBuffer) -> str: ...
|
||||
def find_spec(name: str, package: str | None = None) -> importlib.machinery.ModuleSpec | None: ...
|
||||
def spec_from_loader(
|
||||
name: str, loader: LoaderProtocol | None, *, origin: str | None = None, is_package: bool | None = None
|
||||
name: str, loader: importlib.abc.Loader | None, *, origin: str | None = None, is_package: bool | None = None
|
||||
) -> importlib.machinery.ModuleSpec | None: ...
|
||||
def spec_from_file_location(
|
||||
name: str,
|
||||
location: StrOrBytesPath | None = None,
|
||||
*,
|
||||
loader: LoaderProtocol | None = None,
|
||||
loader: importlib.abc.Loader | None = None,
|
||||
submodule_search_locations: list[str] | None = ...,
|
||||
) -> importlib.machinery.ModuleSpec | None: ...
|
||||
def module_from_spec(spec: importlib.machinery.ModuleSpec) -> types.ModuleType: ...
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import sys
|
||||
from _typeshed import SupportsRead
|
||||
from _typeshed.importlib import LoaderProtocol, MetaPathFinderProtocol, PathEntryFinderProtocol
|
||||
from collections.abc import Callable, Iterable, Iterator
|
||||
from importlib.abc import Loader, MetaPathFinder, PathEntryFinder
|
||||
from typing import IO, Any, NamedTuple, TypeVar
|
||||
from typing_extensions import deprecated
|
||||
|
||||
@@ -23,7 +23,7 @@ if sys.version_info < (3, 12):
|
||||
_PathT = TypeVar("_PathT", bound=Iterable[str])
|
||||
|
||||
class ModuleInfo(NamedTuple):
|
||||
module_finder: MetaPathFinderProtocol | PathEntryFinderProtocol
|
||||
module_finder: MetaPathFinder | PathEntryFinder
|
||||
name: str
|
||||
ispkg: bool
|
||||
|
||||
@@ -37,11 +37,11 @@ if sys.version_info < (3, 12):
|
||||
def __init__(self, fullname: str, file: IO[str], filename: str, etc: tuple[str, str, int]) -> None: ...
|
||||
|
||||
@deprecated("Use importlib.util.find_spec() instead. Will be removed in Python 3.14.")
|
||||
def find_loader(fullname: str) -> LoaderProtocol | None: ...
|
||||
def get_importer(path_item: str) -> PathEntryFinderProtocol | None: ...
|
||||
def find_loader(fullname: str) -> Loader | None: ...
|
||||
def get_importer(path_item: str) -> PathEntryFinder | None: ...
|
||||
@deprecated("Use importlib.util.find_spec() instead. Will be removed in Python 3.14.")
|
||||
def get_loader(module_or_name: str) -> LoaderProtocol | None: ...
|
||||
def iter_importers(fullname: str = "") -> Iterator[MetaPathFinderProtocol | PathEntryFinderProtocol]: ...
|
||||
def get_loader(module_or_name: str) -> Loader | None: ...
|
||||
def iter_importers(fullname: str = "") -> Iterator[MetaPathFinder | PathEntryFinder]: ...
|
||||
def iter_modules(path: Iterable[str] | None = None, prefix: str = "") -> Iterator[ModuleInfo]: ...
|
||||
def read_code(stream: SupportsRead[bytes]) -> Any: ... # undocumented
|
||||
def walk_packages(
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import sys
|
||||
from _typeshed import StrOrBytesPath
|
||||
from collections.abc import Iterator, MutableMapping
|
||||
from dbm import _TFlags
|
||||
from types import TracebackType
|
||||
@@ -43,17 +41,6 @@ class BsdDbShelf(Shelf[_VT]):
|
||||
def last(self) -> tuple[str, _VT]: ...
|
||||
|
||||
class DbfilenameShelf(Shelf[_VT]):
|
||||
if sys.version_info >= (3, 11):
|
||||
def __init__(
|
||||
self, filename: StrOrBytesPath, flag: _TFlags = "c", protocol: int | None = None, writeback: bool = False
|
||||
) -> None: ...
|
||||
else:
|
||||
def __init__(self, filename: str, flag: _TFlags = "c", protocol: int | None = None, writeback: bool = False) -> None: ...
|
||||
def __init__(self, filename: str, flag: _TFlags = "c", protocol: int | None = None, writeback: bool = False) -> None: ...
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
def open(
|
||||
filename: StrOrBytesPath, flag: _TFlags = "c", protocol: int | None = None, writeback: bool = False
|
||||
) -> Shelf[Any]: ...
|
||||
|
||||
else:
|
||||
def open(filename: str, flag: _TFlags = "c", protocol: int | None = None, writeback: bool = False) -> Shelf[Any]: ...
|
||||
def open(filename: str, flag: _TFlags = "c", protocol: int | None = None, writeback: bool = False) -> Shelf[Any]: ...
|
||||
|
||||
@@ -474,13 +474,6 @@ if sys.version_info >= (3, 12):
|
||||
ETHERTYPE_VLAN as ETHERTYPE_VLAN,
|
||||
)
|
||||
|
||||
if sys.platform == "linux":
|
||||
from _socket import ETH_P_ALL as ETH_P_ALL
|
||||
|
||||
if sys.platform != "linux" and sys.platform != "win32" and sys.platform != "darwin":
|
||||
# FreeBSD >= 14.0
|
||||
from _socket import PF_DIVERT as PF_DIVERT
|
||||
|
||||
# Re-exported from errno
|
||||
EBADF: int
|
||||
EAGAIN: int
|
||||
@@ -532,9 +525,6 @@ class AddressFamily(IntEnum):
|
||||
AF_BLUETOOTH = 32
|
||||
if sys.platform == "win32" and sys.version_info >= (3, 12):
|
||||
AF_HYPERV = 34
|
||||
if sys.platform != "linux" and sys.platform != "win32" and sys.platform != "darwin" and sys.version_info >= (3, 12):
|
||||
# FreeBSD >= 14.0
|
||||
AF_DIVERT = 44
|
||||
|
||||
AF_INET = AddressFamily.AF_INET
|
||||
AF_INET6 = AddressFamily.AF_INET6
|
||||
@@ -587,9 +577,6 @@ if sys.platform != "win32" or sys.version_info >= (3, 9):
|
||||
|
||||
if sys.platform == "win32" and sys.version_info >= (3, 12):
|
||||
AF_HYPERV = AddressFamily.AF_HYPERV
|
||||
if sys.platform != "linux" and sys.platform != "win32" and sys.platform != "darwin" and sys.version_info >= (3, 12):
|
||||
# FreeBSD >= 14.0
|
||||
AF_DIVERT = AddressFamily.AF_DIVERT
|
||||
|
||||
class SocketKind(IntEnum):
|
||||
SOCK_STREAM = 1
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import sys
|
||||
from _typeshed import OptExcInfo, ProfileFunction, TraceFunction, structseq
|
||||
from _typeshed.importlib import MetaPathFinderProtocol, PathEntryFinderProtocol
|
||||
from builtins import object as _object
|
||||
from collections.abc import AsyncGenerator, Callable, Sequence
|
||||
from importlib.abc import PathEntryFinder
|
||||
from importlib.machinery import ModuleSpec
|
||||
from io import TextIOWrapper
|
||||
from types import FrameType, ModuleType, TracebackType
|
||||
from typing import Any, Final, Literal, NoReturn, Protocol, TextIO, TypeVar, final
|
||||
@@ -14,6 +15,10 @@ _T = TypeVar("_T")
|
||||
_ExitCode: TypeAlias = str | int | None
|
||||
_OptExcInfo: TypeAlias = OptExcInfo # noqa: Y047 # TODO: obsolete, remove fall 2022 or later
|
||||
|
||||
# Intentionally omits one deprecated and one optional method of `importlib.abc.MetaPathFinder`
|
||||
class _MetaPathFinder(Protocol):
|
||||
def find_spec(self, fullname: str, path: Sequence[str] | None, target: ModuleType | None = ..., /) -> ModuleSpec | None: ...
|
||||
|
||||
# ----- sys variables -----
|
||||
if sys.platform != "win32":
|
||||
abiflags: str
|
||||
@@ -39,13 +44,13 @@ if sys.version_info >= (3, 12):
|
||||
last_exc: BaseException # or undefined.
|
||||
maxsize: int
|
||||
maxunicode: int
|
||||
meta_path: list[MetaPathFinderProtocol]
|
||||
meta_path: list[_MetaPathFinder]
|
||||
modules: dict[str, ModuleType]
|
||||
if sys.version_info >= (3, 10):
|
||||
orig_argv: list[str]
|
||||
path: list[str]
|
||||
path_hooks: list[Callable[[str], PathEntryFinderProtocol]]
|
||||
path_importer_cache: dict[str, PathEntryFinderProtocol | None]
|
||||
path_hooks: list[Callable[[str], PathEntryFinder]]
|
||||
path_importer_cache: dict[str, PathEntryFinder | None]
|
||||
platform: str
|
||||
if sys.version_info >= (3, 9):
|
||||
platlibdir: str
|
||||
|
||||
@@ -374,11 +374,7 @@ class SpooledTemporaryFile(IO[AnyStr], _SpooledTemporaryFileBase):
|
||||
def readlines(self, hint: int = ..., /) -> list[AnyStr]: ... # type: ignore[override]
|
||||
def seek(self, offset: int, whence: int = ...) -> int: ...
|
||||
def tell(self) -> int: ...
|
||||
if sys.version_info >= (3, 11):
|
||||
def truncate(self, size: int | None = None) -> int: ...
|
||||
else:
|
||||
def truncate(self, size: int | None = None) -> None: ... # type: ignore[override]
|
||||
|
||||
def truncate(self, size: int | None = None) -> None: ... # type: ignore[override]
|
||||
@overload
|
||||
def write(self: SpooledTemporaryFile[str], s: str) -> int: ...
|
||||
@overload
|
||||
|
||||
29
crates/red_knot/vendor/typeshed/stdlib/types.pyi
vendored
29
crates/red_knot/vendor/typeshed/stdlib/types.pyi
vendored
@@ -1,6 +1,5 @@
|
||||
import sys
|
||||
from _typeshed import SupportsKeysAndGetItem
|
||||
from _typeshed.importlib import LoaderProtocol
|
||||
from collections.abc import (
|
||||
AsyncGenerator,
|
||||
Awaitable,
|
||||
@@ -17,7 +16,7 @@ from collections.abc import (
|
||||
from importlib.machinery import ModuleSpec
|
||||
|
||||
# pytype crashes if types.MappingProxyType inherits from collections.abc.Mapping instead of typing.Mapping
|
||||
from typing import Any, ClassVar, Literal, Mapping, TypeVar, final, overload # noqa: Y022
|
||||
from typing import Any, ClassVar, Literal, Mapping, Protocol, TypeVar, final, overload # noqa: Y022
|
||||
from typing_extensions import ParamSpec, Self, TypeVarTuple, deprecated
|
||||
|
||||
__all__ = [
|
||||
@@ -65,11 +64,18 @@ _T2 = TypeVar("_T2")
|
||||
_KT = TypeVar("_KT")
|
||||
_VT_co = TypeVar("_VT_co", covariant=True)
|
||||
|
||||
@final
|
||||
class _Cell:
|
||||
def __new__(cls, contents: object = ..., /) -> Self: ...
|
||||
def __eq__(self, value: object, /) -> bool: ...
|
||||
__hash__: ClassVar[None] # type: ignore[assignment]
|
||||
cell_contents: Any
|
||||
|
||||
# Make sure this class definition stays roughly in line with `builtins.function`
|
||||
@final
|
||||
class FunctionType:
|
||||
@property
|
||||
def __closure__(self) -> tuple[CellType, ...] | None: ...
|
||||
def __closure__(self) -> tuple[_Cell, ...] | None: ...
|
||||
__code__: CodeType
|
||||
__defaults__: tuple[Any, ...] | None
|
||||
__dict__: dict[str, Any]
|
||||
@@ -92,7 +98,7 @@ class FunctionType:
|
||||
globals: dict[str, Any],
|
||||
name: str | None = ...,
|
||||
argdefs: tuple[object, ...] | None = ...,
|
||||
closure: tuple[CellType, ...] | None = ...,
|
||||
closure: tuple[_Cell, ...] | None = ...,
|
||||
) -> Self: ...
|
||||
def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
|
||||
@overload
|
||||
@@ -312,12 +318,15 @@ class SimpleNamespace:
|
||||
def __setattr__(self, name: str, value: Any, /) -> None: ...
|
||||
def __delattr__(self, name: str, /) -> None: ...
|
||||
|
||||
class _LoaderProtocol(Protocol):
|
||||
def load_module(self, fullname: str, /) -> ModuleType: ...
|
||||
|
||||
class ModuleType:
|
||||
__name__: str
|
||||
__file__: str | None
|
||||
@property
|
||||
def __dict__(self) -> dict[str, Any]: ... # type: ignore[override]
|
||||
__loader__: LoaderProtocol | None
|
||||
__loader__: _LoaderProtocol | None
|
||||
__package__: str | None
|
||||
__path__: MutableSequence[str]
|
||||
__spec__: ModuleSpec | None
|
||||
@@ -327,12 +336,6 @@ class ModuleType:
|
||||
# using `builtins.__import__` or `importlib.import_module` less painful
|
||||
def __getattr__(self, name: str) -> Any: ...
|
||||
|
||||
@final
|
||||
class CellType:
|
||||
def __new__(cls, contents: object = ..., /) -> Self: ...
|
||||
__hash__: ClassVar[None] # type: ignore[assignment]
|
||||
cell_contents: Any
|
||||
|
||||
_YieldT_co = TypeVar("_YieldT_co", covariant=True)
|
||||
_SendT_contra = TypeVar("_SendT_contra", contravariant=True)
|
||||
_ReturnT_co = TypeVar("_ReturnT_co", covariant=True)
|
||||
@@ -402,7 +405,7 @@ class CoroutineType(Coroutine[_YieldT_co, _SendT_contra, _ReturnT_co]):
|
||||
@final
|
||||
class MethodType:
|
||||
@property
|
||||
def __closure__(self) -> tuple[CellType, ...] | None: ... # inherited from the added function
|
||||
def __closure__(self) -> tuple[_Cell, ...] | None: ... # inherited from the added function
|
||||
@property
|
||||
def __defaults__(self) -> tuple[Any, ...] | None: ... # inherited from the added function
|
||||
@property
|
||||
@@ -567,6 +570,8 @@ def coroutine(func: Callable[_P, Generator[Any, Any, _R]]) -> Callable[_P, Await
|
||||
@overload
|
||||
def coroutine(func: _Fn) -> _Fn: ...
|
||||
|
||||
CellType = _Cell
|
||||
|
||||
if sys.version_info >= (3, 9):
|
||||
class GenericAlias:
|
||||
@property
|
||||
|
||||
@@ -8,6 +8,7 @@ import typing_extensions
|
||||
from _collections_abc import dict_items, dict_keys, dict_values
|
||||
from _typeshed import IdentityFunction, ReadableBuffer, SupportsKeysAndGetItem
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from contextlib import AbstractAsyncContextManager, AbstractContextManager
|
||||
from re import Match as Match, Pattern as Pattern
|
||||
from types import (
|
||||
BuiltinFunctionType,
|
||||
@@ -23,10 +24,10 @@ from types import (
|
||||
)
|
||||
from typing_extensions import Never as _Never, ParamSpec as _ParamSpec
|
||||
|
||||
if sys.version_info >= (3, 9):
|
||||
from types import GenericAlias
|
||||
if sys.version_info >= (3, 10):
|
||||
from types import UnionType
|
||||
if sys.version_info >= (3, 9):
|
||||
from types import GenericAlias
|
||||
|
||||
__all__ = [
|
||||
"AbstractSet",
|
||||
@@ -401,8 +402,8 @@ class Reversible(Iterable[_T_co], Protocol[_T_co]):
|
||||
def __reversed__(self) -> Iterator[_T_co]: ...
|
||||
|
||||
_YieldT_co = TypeVar("_YieldT_co", covariant=True)
|
||||
_SendT_contra = TypeVar("_SendT_contra", contravariant=True, default=None)
|
||||
_ReturnT_co = TypeVar("_ReturnT_co", covariant=True, default=None)
|
||||
_SendT_contra = TypeVar("_SendT_contra", contravariant=True)
|
||||
_ReturnT_co = TypeVar("_ReturnT_co", covariant=True)
|
||||
|
||||
class Generator(Iterator[_YieldT_co], Generic[_YieldT_co, _SendT_contra, _ReturnT_co]):
|
||||
def __next__(self) -> _YieldT_co: ...
|
||||
@@ -427,28 +428,24 @@ class Generator(Iterator[_YieldT_co], Generic[_YieldT_co, _SendT_contra, _Return
|
||||
@property
|
||||
def gi_yieldfrom(self) -> Generator[Any, Any, Any] | None: ...
|
||||
|
||||
# NOTE: Prior to Python 3.13 these aliases are lacking the second _ExitT_co parameter
|
||||
if sys.version_info >= (3, 13):
|
||||
from contextlib import AbstractAsyncContextManager as AsyncContextManager, AbstractContextManager as ContextManager
|
||||
else:
|
||||
from contextlib import AbstractAsyncContextManager, AbstractContextManager
|
||||
# NOTE: Technically we would like this to be able to accept a second parameter as well, just
|
||||
# like it's counterpart in contextlib, however `typing._SpecialGenericAlias` enforces the
|
||||
# correct number of arguments at runtime, so we would be hiding runtime errors.
|
||||
@runtime_checkable
|
||||
class ContextManager(AbstractContextManager[_T_co, bool | None], Protocol[_T_co]): ...
|
||||
|
||||
@runtime_checkable
|
||||
class ContextManager(AbstractContextManager[_T_co, bool | None], Protocol[_T_co]): ...
|
||||
|
||||
@runtime_checkable
|
||||
class AsyncContextManager(AbstractAsyncContextManager[_T_co, bool | None], Protocol[_T_co]): ...
|
||||
# NOTE: Technically we would like this to be able to accept a second parameter as well, just
|
||||
# like it's counterpart in contextlib, however `typing._SpecialGenericAlias` enforces the
|
||||
# correct number of arguments at runtime, so we would be hiding runtime errors.
|
||||
@runtime_checkable
|
||||
class AsyncContextManager(AbstractAsyncContextManager[_T_co, bool | None], Protocol[_T_co]): ...
|
||||
|
||||
@runtime_checkable
|
||||
class Awaitable(Protocol[_T_co]):
|
||||
@abstractmethod
|
||||
def __await__(self) -> Generator[Any, Any, _T_co]: ...
|
||||
|
||||
# Non-default variations to accommodate couroutines, and `AwaitableGenerator` having a 4th type parameter.
|
||||
_SendT_contra_nd = TypeVar("_SendT_contra_nd", contravariant=True)
|
||||
_ReturnT_co_nd = TypeVar("_ReturnT_co_nd", covariant=True)
|
||||
|
||||
class Coroutine(Awaitable[_ReturnT_co_nd], Generic[_YieldT_co, _SendT_contra_nd, _ReturnT_co_nd]):
|
||||
class Coroutine(Awaitable[_ReturnT_co], Generic[_YieldT_co, _SendT_contra, _ReturnT_co]):
|
||||
__name__: str
|
||||
__qualname__: str
|
||||
@property
|
||||
@@ -460,7 +457,7 @@ class Coroutine(Awaitable[_ReturnT_co_nd], Generic[_YieldT_co, _SendT_contra_nd,
|
||||
@property
|
||||
def cr_running(self) -> bool: ...
|
||||
@abstractmethod
|
||||
def send(self, value: _SendT_contra_nd, /) -> _YieldT_co: ...
|
||||
def send(self, value: _SendT_contra, /) -> _YieldT_co: ...
|
||||
@overload
|
||||
@abstractmethod
|
||||
def throw(
|
||||
@@ -476,9 +473,9 @@ class Coroutine(Awaitable[_ReturnT_co_nd], Generic[_YieldT_co, _SendT_contra_nd,
|
||||
# The parameters correspond to Generator, but the 4th is the original type.
|
||||
@type_check_only
|
||||
class AwaitableGenerator(
|
||||
Awaitable[_ReturnT_co_nd],
|
||||
Generator[_YieldT_co, _SendT_contra_nd, _ReturnT_co_nd],
|
||||
Generic[_YieldT_co, _SendT_contra_nd, _ReturnT_co_nd, _S],
|
||||
Awaitable[_ReturnT_co],
|
||||
Generator[_YieldT_co, _SendT_contra, _ReturnT_co],
|
||||
Generic[_YieldT_co, _SendT_contra, _ReturnT_co, _S],
|
||||
metaclass=ABCMeta,
|
||||
): ...
|
||||
|
||||
|
||||
@@ -13,17 +13,17 @@ readme = "../../README.md"
|
||||
default-run = "ruff"
|
||||
|
||||
[dependencies]
|
||||
ruff_cache = { workspace = true }
|
||||
ruff_diagnostics = { workspace = true }
|
||||
ruff_linter = { workspace = true, features = ["clap"] }
|
||||
ruff_macros = { workspace = true }
|
||||
ruff_notebook = { workspace = true }
|
||||
ruff_python_ast = { workspace = true }
|
||||
ruff_python_formatter = { workspace = true }
|
||||
ruff_server = { workspace = true }
|
||||
ruff_source_file = { workspace = true }
|
||||
ruff_text_size = { workspace = true }
|
||||
ruff_workspace = { workspace = true }
|
||||
ruff_cache = { path = "../ruff_cache" }
|
||||
ruff_diagnostics = { path = "../ruff_diagnostics" }
|
||||
ruff_linter = { path = "../ruff_linter", features = ["clap"] }
|
||||
ruff_macros = { path = "../ruff_macros" }
|
||||
ruff_notebook = { path = "../ruff_notebook" }
|
||||
ruff_python_ast = { path = "../ruff_python_ast" }
|
||||
ruff_python_formatter = { path = "../ruff_python_formatter" }
|
||||
ruff_server = { path = "../ruff_server" }
|
||||
ruff_source_file = { path = "../ruff_source_file" }
|
||||
ruff_text_size = { path = "../ruff_text_size" }
|
||||
ruff_workspace = { path = "../ruff_workspace" }
|
||||
|
||||
anyhow = { workspace = true }
|
||||
argfile = { workspace = true }
|
||||
@@ -60,7 +60,7 @@ wild = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
# Enable test rules during development
|
||||
ruff_linter = { workspace = true, features = ["clap", "test-rules"] }
|
||||
ruff_linter = { path = "../ruff_linter", features = ["clap", "test-rules"] }
|
||||
# Avoid writing colored snapshots when running tests from the terminal
|
||||
colored = { workspace = true, features = ["no-color"] }
|
||||
insta = { workspace = true, features = ["filters", "json"] }
|
||||
|
||||
@@ -111,13 +111,7 @@ pub enum Command {
|
||||
output_format: HelpFormat,
|
||||
},
|
||||
/// List or describe the available configuration options.
|
||||
Config {
|
||||
/// Config key to show
|
||||
option: Option<String>,
|
||||
/// Output format
|
||||
#[arg(long, value_enum, default_value = "text")]
|
||||
output_format: HelpFormat,
|
||||
},
|
||||
Config { option: Option<String> },
|
||||
/// List all supported upstream linters.
|
||||
Linter {
|
||||
/// Output format
|
||||
|
||||
@@ -1,38 +1,19 @@
|
||||
use anyhow::{anyhow, Result};
|
||||
|
||||
use crate::args::HelpFormat;
|
||||
|
||||
use ruff_workspace::options::Options;
|
||||
use ruff_workspace::options_base::OptionsMetadata;
|
||||
|
||||
#[allow(clippy::print_stdout)]
|
||||
pub(crate) fn config(key: Option<&str>, format: HelpFormat) -> Result<()> {
|
||||
pub(crate) fn config(key: Option<&str>) -> Result<()> {
|
||||
match key {
|
||||
None => {
|
||||
let metadata = Options::metadata();
|
||||
match format {
|
||||
HelpFormat::Text => {
|
||||
println!("{metadata}");
|
||||
}
|
||||
|
||||
HelpFormat::Json => {
|
||||
println!("{}", &serde_json::to_string_pretty(&metadata)?);
|
||||
}
|
||||
}
|
||||
}
|
||||
None => print!("{}", Options::metadata()),
|
||||
Some(key) => match Options::metadata().find(key) {
|
||||
None => {
|
||||
return Err(anyhow!("Unknown option: {key}"));
|
||||
}
|
||||
Some(entry) => match format {
|
||||
HelpFormat::Text => {
|
||||
print!("{entry}");
|
||||
}
|
||||
|
||||
HelpFormat::Json => {
|
||||
println!("{}", &serde_json::to_string_pretty(&entry)?);
|
||||
}
|
||||
},
|
||||
Some(entry) => {
|
||||
print!("{entry}");
|
||||
}
|
||||
},
|
||||
}
|
||||
Ok(())
|
||||
|
||||
@@ -401,10 +401,7 @@ pub(crate) fn format_source(
|
||||
// Format the cell.
|
||||
let formatted =
|
||||
format_module_source(unformatted, options.clone()).map_err(|err| {
|
||||
if let FormatModuleError::ParseError(mut err) = err {
|
||||
// Offset the error location by the start offset of the cell to report
|
||||
// the correct cell index.
|
||||
err.location += start;
|
||||
if let FormatModuleError::ParseError(err) = err {
|
||||
DisplayParseError::from_source_kind(
|
||||
err,
|
||||
path.map(Path::to_path_buf),
|
||||
|
||||
@@ -180,11 +180,8 @@ pub fn run(
|
||||
}
|
||||
Ok(ExitStatus::Success)
|
||||
}
|
||||
Command::Config {
|
||||
option,
|
||||
output_format,
|
||||
} => {
|
||||
commands::config::config(option.as_deref(), output_format)?;
|
||||
Command::Config { option } => {
|
||||
commands::config::config(option.as_deref())?;
|
||||
Ok(ExitStatus::Success)
|
||||
}
|
||||
Command::Linter { output_format } => {
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
//! Tests for the `ruff config` subcommand.
|
||||
use std::process::Command;
|
||||
|
||||
use insta_cmd::{assert_cmd_snapshot, get_cargo_bin};
|
||||
|
||||
const BIN_NAME: &str = "ruff";
|
||||
|
||||
#[test]
|
||||
fn lint_select() {
|
||||
assert_cmd_snapshot!(
|
||||
Command::new(get_cargo_bin(BIN_NAME)).arg("config").arg("lint.select"), @r###"
|
||||
success: true
|
||||
exit_code: 0
|
||||
----- stdout -----
|
||||
A list of rule codes or prefixes to enable. Prefixes can specify exact
|
||||
rules (like `F841`), entire categories (like `F`), or anything in
|
||||
between.
|
||||
|
||||
When breaking ties between enabled and disabled rules (via `select` and
|
||||
`ignore`, respectively), more specific prefixes override less
|
||||
specific prefixes.
|
||||
|
||||
Default value: ["E4", "E7", "E9", "F"]
|
||||
Type: list[RuleSelector]
|
||||
Example usage:
|
||||
```toml
|
||||
# On top of the defaults (`E4`, E7`, `E9`, and `F`), enable flake8-bugbear (`B`) and flake8-quotes (`Q`).
|
||||
select = ["E4", "E7", "E9", "F", "B", "Q"]
|
||||
```
|
||||
|
||||
----- stderr -----
|
||||
"###
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn lint_select_json() {
|
||||
assert_cmd_snapshot!(
|
||||
Command::new(get_cargo_bin(BIN_NAME)).arg("config").arg("lint.select").arg("--output-format").arg("json"), @r###"
|
||||
success: true
|
||||
exit_code: 0
|
||||
----- stdout -----
|
||||
{
|
||||
"doc": "A list of rule codes or prefixes to enable. Prefixes can specify exact\nrules (like `F841`), entire categories (like `F`), or anything in\nbetween.\n\nWhen breaking ties between enabled and disabled rules (via `select` and\n`ignore`, respectively), more specific prefixes override less\nspecific prefixes.",
|
||||
"default": "[\"E4\", \"E7\", \"E9\", \"F\"]",
|
||||
"value_type": "list[RuleSelector]",
|
||||
"scope": null,
|
||||
"example": "# On top of the defaults (`E4`, E7`, `E9`, and `F`), enable flake8-bugbear (`B`) and flake8-quotes (`Q`).\nselect = [\"E4\", \"E7\", \"E9\", \"F\", \"B\", \"Q\"]",
|
||||
"deprecated": null
|
||||
}
|
||||
|
||||
----- stderr -----
|
||||
"###
|
||||
);
|
||||
}
|
||||
@@ -38,14 +38,14 @@ serde_json = { workspace = true }
|
||||
url = { workspace = true }
|
||||
ureq = { workspace = true }
|
||||
criterion = { workspace = true, default-features = false }
|
||||
codspeed-criterion-compat = { workspace = true, default-features = false, optional = true }
|
||||
codspeed-criterion-compat = { workspace = true, default-features = false, optional = true}
|
||||
|
||||
[dev-dependencies]
|
||||
ruff_linter = { workspace = true }
|
||||
ruff_python_ast = { workspace = true }
|
||||
ruff_python_formatter = { workspace = true }
|
||||
ruff_python_index = { workspace = true }
|
||||
ruff_python_parser = { workspace = true }
|
||||
ruff_linter = { path = "../ruff_linter" }
|
||||
ruff_python_ast = { path = "../ruff_python_ast" }
|
||||
ruff_python_formatter = { path = "../ruff_python_formatter" }
|
||||
ruff_python_index = { path = "../ruff_python_index" }
|
||||
ruff_python_parser = { path = "../ruff_python_parser" }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -10,7 +10,7 @@ use ruff_linter::settings::{flags, LinterSettings};
|
||||
use ruff_linter::source_kind::SourceKind;
|
||||
use ruff_linter::{registry::Rule, RuleSelector};
|
||||
use ruff_python_ast::PySourceType;
|
||||
use ruff_python_parser::{parse_program_tokens, tokenize, Mode};
|
||||
use ruff_python_parser::{lexer, parse_program_tokens, Mode};
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
#[global_allocator]
|
||||
@@ -55,7 +55,7 @@ fn benchmark_linter(mut group: BenchmarkGroup, settings: &LinterSettings) {
|
||||
&case,
|
||||
|b, case| {
|
||||
// Tokenize the source.
|
||||
let tokens = tokenize(case.code(), Mode::Module);
|
||||
let tokens: Vec<_> = lexer::lex(case.code(), Mode::Module).collect();
|
||||
|
||||
// Parse the source.
|
||||
let ast = parse_program_tokens(tokens.clone(), case.code(), false).unwrap();
|
||||
|
||||
@@ -19,7 +19,7 @@ filetime = { workspace = true }
|
||||
seahash = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
ruff_macros = { workspace = true }
|
||||
ruff_macros = { path = "../ruff_macros" }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -11,18 +11,18 @@ repository = { workspace = true }
|
||||
license = { workspace = true }
|
||||
|
||||
[dependencies]
|
||||
ruff = { workspace = true }
|
||||
ruff_diagnostics = { workspace = true }
|
||||
ruff_formatter = { workspace = true }
|
||||
ruff_linter = { workspace = true, features = ["schemars"] }
|
||||
ruff_notebook = { workspace = true }
|
||||
ruff_python_ast = { workspace = true }
|
||||
ruff_python_codegen = { workspace = true }
|
||||
ruff_python_formatter = { workspace = true }
|
||||
ruff_python_parser = { workspace = true }
|
||||
ruff_python_stdlib = { workspace = true }
|
||||
ruff_python_trivia = { workspace = true }
|
||||
ruff_workspace = { workspace = true, features = ["schemars"] }
|
||||
ruff = { path = "../ruff" }
|
||||
ruff_diagnostics = { path = "../ruff_diagnostics" }
|
||||
ruff_formatter = { path = "../ruff_formatter" }
|
||||
ruff_linter = { path = "../ruff_linter", features = ["schemars"] }
|
||||
ruff_notebook = { path = "../ruff_notebook" }
|
||||
ruff_python_ast = { path = "../ruff_python_ast" }
|
||||
ruff_python_codegen = { path = "../ruff_python_codegen" }
|
||||
ruff_python_formatter = { path = "../ruff_python_formatter" }
|
||||
ruff_python_parser = { path = "../ruff_python_parser" }
|
||||
ruff_python_stdlib = { path = "../ruff_python_stdlib" }
|
||||
ruff_python_trivia = { path = "../ruff_python_trivia" }
|
||||
ruff_workspace = { path = "../ruff_workspace", features = ["schemars"] }
|
||||
|
||||
anyhow = { workspace = true }
|
||||
clap = { workspace = true, features = ["wrap_help"] }
|
||||
|
||||
@@ -14,7 +14,7 @@ license = { workspace = true }
|
||||
doctest = false
|
||||
|
||||
[dependencies]
|
||||
ruff_text_size = { workspace = true }
|
||||
ruff_text_size = { path = "../ruff_text_size" }
|
||||
|
||||
anyhow = { workspace = true }
|
||||
log = { workspace = true }
|
||||
|
||||
@@ -11,9 +11,9 @@ repository = { workspace = true }
|
||||
license = { workspace = true }
|
||||
|
||||
[dependencies]
|
||||
ruff_cache = { workspace = true }
|
||||
ruff_macros = { workspace = true }
|
||||
ruff_text_size = { workspace = true }
|
||||
ruff_cache = { path = "../ruff_cache" }
|
||||
ruff_macros = { path = "../ruff_macros" }
|
||||
ruff_text_size = { path = "../ruff_text_size" }
|
||||
|
||||
drop_bomb = { workspace = true }
|
||||
rustc-hash = { workspace = true }
|
||||
|
||||
@@ -14,7 +14,7 @@ license = { workspace = true }
|
||||
doctest = false
|
||||
|
||||
[dependencies]
|
||||
ruff_macros = { workspace = true }
|
||||
ruff_macros = { path = "../ruff_macros" }
|
||||
|
||||
[dev-dependencies]
|
||||
static_assertions = { workspace = true }
|
||||
|
||||
@@ -13,20 +13,20 @@ license = { workspace = true }
|
||||
[lib]
|
||||
|
||||
[dependencies]
|
||||
ruff_cache = { workspace = true }
|
||||
ruff_diagnostics = { workspace = true, features = ["serde"] }
|
||||
ruff_notebook = { workspace = true }
|
||||
ruff_macros = { workspace = true }
|
||||
ruff_python_ast = { workspace = true, features = ["serde"] }
|
||||
ruff_python_codegen = { workspace = true }
|
||||
ruff_python_index = { workspace = true }
|
||||
ruff_python_literal = { workspace = true }
|
||||
ruff_python_semantic = { workspace = true }
|
||||
ruff_python_stdlib = { workspace = true }
|
||||
ruff_python_trivia = { workspace = true }
|
||||
ruff_python_parser = { workspace = true }
|
||||
ruff_source_file = { workspace = true, features = ["serde"] }
|
||||
ruff_text_size = { workspace = true }
|
||||
ruff_cache = { path = "../ruff_cache" }
|
||||
ruff_diagnostics = { path = "../ruff_diagnostics", features = ["serde"] }
|
||||
ruff_notebook = { path = "../ruff_notebook" }
|
||||
ruff_macros = { path = "../ruff_macros" }
|
||||
ruff_python_ast = { path = "../ruff_python_ast", features = ["serde"] }
|
||||
ruff_python_codegen = { path = "../ruff_python_codegen" }
|
||||
ruff_python_index = { path = "../ruff_python_index" }
|
||||
ruff_python_literal = { path = "../ruff_python_literal" }
|
||||
ruff_python_semantic = { path = "../ruff_python_semantic" }
|
||||
ruff_python_stdlib = { path = "../ruff_python_stdlib" }
|
||||
ruff_python_trivia = { path = "../ruff_python_trivia" }
|
||||
ruff_python_parser = { path = "../ruff_python_parser" }
|
||||
ruff_source_file = { path = "../ruff_source_file", features = ["serde"] }
|
||||
ruff_text_size = { path = "../ruff_text_size" }
|
||||
|
||||
aho-corasick = { workspace = true }
|
||||
annotate-snippets = { workspace = true, features = ["color"] }
|
||||
|
||||
@@ -90,10 +90,3 @@ def f():
|
||||
|
||||
def func() -> DataFrame[[DataFrame[_P, _R]], DataFrame[_P, _R]]:
|
||||
...
|
||||
|
||||
|
||||
def f():
|
||||
from pandas import DataFrame, Series
|
||||
|
||||
def func(self) -> DataFrame | list[Series]:
|
||||
pass
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
"""__init__.py with nonempty __all__
|
||||
"""__init__.py with __all__
|
||||
|
||||
Unused stdlib and third party imports are unsafe removals
|
||||
|
||||
@@ -33,10 +33,10 @@ from . import aliased as aliased # Ok: is redundant alias
|
||||
from . import exported # Ok: is exported in __all__
|
||||
|
||||
|
||||
from . import unused # F401: add to __all__
|
||||
# from . import unused # F401: add to __all__
|
||||
|
||||
|
||||
from . import renamed as bees # F401: add to __all__
|
||||
# from . import renamed as bees # F401: add to __all__
|
||||
|
||||
|
||||
__all__ = ["argparse", "exported"]
|
||||
@@ -1,11 +0,0 @@
|
||||
"""__init__.py with empty __all__
|
||||
"""
|
||||
|
||||
|
||||
from . import unused # F401: add to __all__
|
||||
|
||||
|
||||
from . import renamed as bees # F401: add to __all__
|
||||
|
||||
|
||||
__all__ = []
|
||||
@@ -1 +0,0 @@
|
||||
# empty module imported by __init__.py for test fixture
|
||||
@@ -1 +0,0 @@
|
||||
# empty module imported by __init__.py for test fixture
|
||||
@@ -1,11 +0,0 @@
|
||||
"""__init__.py with mis-typed __all__
|
||||
"""
|
||||
|
||||
|
||||
from . import unused # F401: recommend add to all w/o fix
|
||||
|
||||
|
||||
from . import renamed as bees # F401: recommend add to all w/o fix
|
||||
|
||||
|
||||
__all__ = None
|
||||
@@ -1 +0,0 @@
|
||||
# empty module imported by __init__.py for test fixture
|
||||
@@ -1 +0,0 @@
|
||||
# empty module imported by __init__.py for test fixture
|
||||
@@ -1,8 +0,0 @@
|
||||
"""__init__.py with multiple imports added to all in one edit
|
||||
"""
|
||||
|
||||
|
||||
from . import unused, renamed as bees # F401: add to __all__
|
||||
|
||||
|
||||
__all__ = [];
|
||||
@@ -1 +0,0 @@
|
||||
# empty module imported by __init__.py for test fixture
|
||||
@@ -1 +0,0 @@
|
||||
# empty module imported by __init__.py for test fixture
|
||||
@@ -1,16 +0,0 @@
|
||||
"""__init__.py with __all__ populated by conditional plus-eq
|
||||
|
||||
multiple __all__ so cannot offer a fix to add to them
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
from . import unused, exported, renamed as bees
|
||||
|
||||
if sys.version_info > (3, 9):
|
||||
from . import also_exported
|
||||
|
||||
__all__ = ["exported"]
|
||||
|
||||
if sys.version_info >= (3, 9):
|
||||
__all__ += ["also_exported"]
|
||||
@@ -1 +0,0 @@
|
||||
# empty module imported by __init__.py for test fixture
|
||||
@@ -1 +0,0 @@
|
||||
# empty module imported by __init__.py for test fixture
|
||||
@@ -1 +0,0 @@
|
||||
# empty module imported by __init__.py for test fixture
|
||||
@@ -1 +0,0 @@
|
||||
# empty module imported by __init__.py for test fixture
|
||||
@@ -103,7 +103,7 @@ def f():
|
||||
|
||||
|
||||
def f():
|
||||
# Invalid - nonexistent error code with multibyte character
|
||||
# Invalid - nonexistant error code with multibyte character
|
||||
d = 1 #
noqa: F841, E50
|
||||
e = 1 #
noqa: E50
|
||||
|
||||
|
||||
@@ -1065,17 +1065,13 @@ pub(crate) fn expression(expr: &Expr, checker: &mut Checker) {
|
||||
pyflakes::rules::invalid_print_syntax(checker, left);
|
||||
}
|
||||
}
|
||||
Expr::BinOp(
|
||||
bin_op @ ast::ExprBinOp {
|
||||
left,
|
||||
op: Operator::Mod,
|
||||
right,
|
||||
range: _,
|
||||
},
|
||||
) => {
|
||||
if let Expr::StringLiteral(format_string @ ast::ExprStringLiteral { value, .. }) =
|
||||
left.as_ref()
|
||||
{
|
||||
Expr::BinOp(ast::ExprBinOp {
|
||||
left,
|
||||
op: Operator::Mod,
|
||||
right,
|
||||
range: _,
|
||||
}) => {
|
||||
if let Expr::StringLiteral(ast::ExprStringLiteral { value, .. }) = left.as_ref() {
|
||||
if checker.any_enabled(&[
|
||||
Rule::PercentFormatInvalidFormat,
|
||||
Rule::PercentFormatExpectedMapping,
|
||||
@@ -1155,14 +1151,10 @@ pub(crate) fn expression(expr: &Expr, checker: &mut Checker) {
|
||||
pyupgrade::rules::printf_string_formatting(checker, expr, right);
|
||||
}
|
||||
if checker.enabled(Rule::BadStringFormatCharacter) {
|
||||
pylint::rules::bad_string_format_character::percent(
|
||||
checker,
|
||||
expr,
|
||||
format_string,
|
||||
);
|
||||
pylint::rules::bad_string_format_character::percent(checker, expr);
|
||||
}
|
||||
if checker.enabled(Rule::BadStringFormatType) {
|
||||
pylint::rules::bad_string_format_type(checker, bin_op, format_string);
|
||||
pylint::rules::bad_string_format_type(checker, expr, right);
|
||||
}
|
||||
if checker.enabled(Rule::HardcodedSQLExpression) {
|
||||
flake8_bandit::rules::hardcoded_sql_expression(checker, expr);
|
||||
|
||||
@@ -2,7 +2,7 @@ use ruff_python_ast::StringLike;
|
||||
|
||||
use crate::checkers::ast::Checker;
|
||||
use crate::codes::Rule;
|
||||
use crate::rules::{flake8_bandit, flake8_pyi, flake8_quotes, pycodestyle, ruff};
|
||||
use crate::rules::{flake8_bandit, flake8_pyi, flake8_quotes, ruff};
|
||||
|
||||
/// Run lint rules over a [`StringLike`] syntax nodes.
|
||||
pub(crate) fn string_like(string_like: StringLike, checker: &mut Checker) {
|
||||
@@ -36,7 +36,4 @@ pub(crate) fn string_like(string_like: StringLike, checker: &mut Checker) {
|
||||
if checker.enabled(Rule::AvoidableEscapedQuote) && checker.settings.flake8_quotes.avoid_escape {
|
||||
flake8_quotes::rules::avoidable_escaped_quote(checker, string_like);
|
||||
}
|
||||
if checker.enabled(Rule::InvalidEscapeSequence) {
|
||||
pycodestyle::rules::invalid_escape_sequence(checker, string_like);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,13 +5,13 @@ use std::path::Path;
|
||||
use ruff_notebook::CellOffsets;
|
||||
use ruff_python_ast::PySourceType;
|
||||
use ruff_python_codegen::Stylist;
|
||||
use ruff_python_parser::lexer::LexResult;
|
||||
|
||||
use ruff_diagnostics::Diagnostic;
|
||||
use ruff_python_index::Indexer;
|
||||
use ruff_source_file::Locator;
|
||||
|
||||
use crate::directives::TodoComment;
|
||||
use crate::linter::TokenSource;
|
||||
use crate::registry::{AsRule, Rule};
|
||||
use crate::rules::pycodestyle::rules::BlankLinesChecker;
|
||||
use crate::rules::{
|
||||
@@ -22,7 +22,7 @@ use crate::settings::LinterSettings;
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn check_tokens(
|
||||
tokens: &TokenSource,
|
||||
tokens: &[LexResult],
|
||||
path: &Path,
|
||||
locator: &Locator,
|
||||
indexer: &Indexer,
|
||||
@@ -42,7 +42,7 @@ pub(crate) fn check_tokens(
|
||||
Rule::BlankLinesBeforeNestedDefinition,
|
||||
]) {
|
||||
BlankLinesChecker::new(locator, stylist, settings, source_type, cell_offsets)
|
||||
.check_lines(tokens.kinds(), &mut diagnostics);
|
||||
.check_lines(tokens, &mut diagnostics);
|
||||
}
|
||||
|
||||
if settings.rules.enabled(Rule::BlanketTypeIgnore) {
|
||||
@@ -75,6 +75,18 @@ pub(crate) fn check_tokens(
|
||||
pyupgrade::rules::unnecessary_coding_comment(&mut diagnostics, locator, indexer);
|
||||
}
|
||||
|
||||
if settings.rules.enabled(Rule::InvalidEscapeSequence) {
|
||||
for (tok, range) in tokens.iter().flatten() {
|
||||
pycodestyle::rules::invalid_escape_sequence(
|
||||
&mut diagnostics,
|
||||
locator,
|
||||
indexer,
|
||||
tok,
|
||||
*range,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if settings.rules.enabled(Rule::TabIndentation) {
|
||||
pycodestyle::rules::tab_indentation(&mut diagnostics, locator, indexer);
|
||||
}
|
||||
@@ -86,8 +98,8 @@ pub(crate) fn check_tokens(
|
||||
Rule::InvalidCharacterNul,
|
||||
Rule::InvalidCharacterZeroWidthSpace,
|
||||
]) {
|
||||
for (token, range) in tokens.kinds() {
|
||||
pylint::rules::invalid_string_characters(&mut diagnostics, token, range, locator);
|
||||
for (tok, range) in tokens.iter().flatten() {
|
||||
pylint::rules::invalid_string_characters(&mut diagnostics, tok, *range, locator);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,7 +110,7 @@ pub(crate) fn check_tokens(
|
||||
]) {
|
||||
pycodestyle::rules::compound_statements(
|
||||
&mut diagnostics,
|
||||
tokens.kinds(),
|
||||
tokens,
|
||||
locator,
|
||||
indexer,
|
||||
source_type,
|
||||
@@ -112,7 +124,7 @@ pub(crate) fn check_tokens(
|
||||
]) {
|
||||
flake8_implicit_str_concat::rules::implicit(
|
||||
&mut diagnostics,
|
||||
tokens.kinds(),
|
||||
tokens,
|
||||
settings,
|
||||
locator,
|
||||
indexer,
|
||||
@@ -124,11 +136,11 @@ pub(crate) fn check_tokens(
|
||||
Rule::TrailingCommaOnBareTuple,
|
||||
Rule::ProhibitedTrailingComma,
|
||||
]) {
|
||||
flake8_commas::rules::trailing_commas(&mut diagnostics, tokens.kinds(), locator, indexer);
|
||||
flake8_commas::rules::trailing_commas(&mut diagnostics, tokens, locator, indexer);
|
||||
}
|
||||
|
||||
if settings.rules.enabled(Rule::ExtraneousParentheses) {
|
||||
pyupgrade::rules::extraneous_parentheses(&mut diagnostics, tokens.kinds(), locator);
|
||||
pyupgrade::rules::extraneous_parentheses(&mut diagnostics, tokens, locator);
|
||||
}
|
||||
|
||||
if source_type.is_stub() && settings.rules.enabled(Rule::TypeCommentInStub) {
|
||||
@@ -172,7 +184,7 @@ pub(crate) fn check_tokens(
|
||||
}
|
||||
|
||||
if settings.rules.enabled(Rule::TooManyNewlinesAtEndOfFile) {
|
||||
pycodestyle::rules::too_many_newlines_at_end_of_file(&mut diagnostics, tokens.kinds());
|
||||
pycodestyle::rules::too_many_newlines_at_end_of_file(&mut diagnostics, tokens);
|
||||
}
|
||||
|
||||
diagnostics.retain(|diagnostic| settings.rules.enabled(diagnostic.kind.rule()));
|
||||
|
||||
@@ -131,7 +131,7 @@ fn extract_noqa_line_for(lxr: &[LexResult], locator: &Locator, indexer: &Indexer
|
||||
|
||||
// For multi-line strings, we expect `noqa` directives on the last line of the
|
||||
// string.
|
||||
Tok::String { flags, .. } if flags.is_triple_quoted() => {
|
||||
Tok::String { kind, .. } if kind.is_triple_quoted() => {
|
||||
if locator.contains_line_break(*range) {
|
||||
string_mappings.push(TextRange::new(
|
||||
locator.line_start(range.start()),
|
||||
|
||||
@@ -4,26 +4,27 @@
|
||||
use std::iter::FusedIterator;
|
||||
|
||||
use ruff_python_ast::{self as ast, Stmt, Suite};
|
||||
use ruff_python_parser::{TokenKind, TokenKindIter};
|
||||
use ruff_python_parser::lexer::LexResult;
|
||||
use ruff_python_parser::Tok;
|
||||
use ruff_text_size::{Ranged, TextSize};
|
||||
|
||||
use ruff_python_ast::statement_visitor::{walk_stmt, StatementVisitor};
|
||||
use ruff_source_file::{Locator, UniversalNewlineIterator};
|
||||
|
||||
/// Extract doc lines (standalone comments) from a token sequence.
|
||||
pub(crate) fn doc_lines_from_tokens(tokens: TokenKindIter) -> DocLines {
|
||||
DocLines::new(tokens)
|
||||
pub(crate) fn doc_lines_from_tokens(lxr: &[LexResult]) -> DocLines {
|
||||
DocLines::new(lxr)
|
||||
}
|
||||
|
||||
pub(crate) struct DocLines<'a> {
|
||||
inner: TokenKindIter<'a>,
|
||||
inner: std::iter::Flatten<core::slice::Iter<'a, LexResult>>,
|
||||
prev: TextSize,
|
||||
}
|
||||
|
||||
impl<'a> DocLines<'a> {
|
||||
fn new(tokens: TokenKindIter<'a>) -> Self {
|
||||
fn new(lxr: &'a [LexResult]) -> Self {
|
||||
Self {
|
||||
inner: tokens,
|
||||
inner: lxr.iter().flatten(),
|
||||
prev: TextSize::default(),
|
||||
}
|
||||
}
|
||||
@@ -38,15 +39,15 @@ impl Iterator for DocLines<'_> {
|
||||
let (tok, range) = self.inner.next()?;
|
||||
|
||||
match tok {
|
||||
TokenKind::Comment => {
|
||||
Tok::Comment(..) => {
|
||||
if at_start_of_line {
|
||||
break Some(range.start());
|
||||
}
|
||||
}
|
||||
TokenKind::Newline | TokenKind::NonLogicalNewline => {
|
||||
Tok::Newline | Tok::NonLogicalNewline => {
|
||||
at_start_of_line = true;
|
||||
}
|
||||
TokenKind::Indent | TokenKind::Dedent => {
|
||||
Tok::Indent | Tok::Dedent => {
|
||||
// ignore
|
||||
}
|
||||
_ => {
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
//! Interface for generating fix edits from higher-level actions (e.g., "remove an argument").
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
|
||||
use ruff_diagnostics::Edit;
|
||||
use ruff_python_ast::parenthesize::parenthesized_range;
|
||||
use ruff_python_ast::{self as ast, Arguments, ExceptHandler, Expr, ExprList, Stmt};
|
||||
use ruff_python_ast::{self as ast, Arguments, ExceptHandler, Stmt};
|
||||
use ruff_python_ast::{AnyNodeRef, ArgOrKeyword};
|
||||
use ruff_python_codegen::Stylist;
|
||||
use ruff_python_index::Indexer;
|
||||
@@ -126,7 +124,7 @@ pub(crate) fn remove_unused_imports<'a>(
|
||||
|
||||
/// Edits to make the specified imports explicit, e.g. change `import x` to `import x as x`.
|
||||
pub(crate) fn make_redundant_alias<'a>(
|
||||
member_names: impl Iterator<Item = Cow<'a, str>>,
|
||||
member_names: impl Iterator<Item = &'a str>,
|
||||
stmt: &Stmt,
|
||||
) -> Vec<Edit> {
|
||||
let aliases = match stmt {
|
||||
@@ -146,53 +144,6 @@ pub(crate) fn make_redundant_alias<'a>(
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Fix to add the specified imports to the `__all__` export list.
|
||||
pub(crate) fn add_to_dunder_all<'a>(
|
||||
names: impl Iterator<Item = &'a str>,
|
||||
expr: &Expr,
|
||||
stylist: &Stylist,
|
||||
) -> Vec<Edit> {
|
||||
let (insertion_point, export_prefix_length) = match expr {
|
||||
Expr::List(ExprList { elts, range, .. }) => (
|
||||
elts.last()
|
||||
.map_or(range.end() - "]".text_len(), Ranged::end),
|
||||
elts.len(),
|
||||
),
|
||||
Expr::Tuple(tup) if tup.parenthesized => (
|
||||
tup.elts
|
||||
.last()
|
||||
.map_or(tup.end() - ")".text_len(), Ranged::end),
|
||||
tup.elts.len(),
|
||||
),
|
||||
Expr::Tuple(tup) if !tup.parenthesized => (
|
||||
tup.elts
|
||||
.last()
|
||||
.expect("unparenthesized empty tuple is not possible")
|
||||
.range()
|
||||
.end(),
|
||||
tup.elts.len(),
|
||||
),
|
||||
_ => {
|
||||
// we don't know how to insert into this expression
|
||||
return vec![];
|
||||
}
|
||||
};
|
||||
let quote = stylist.quote();
|
||||
let mut edits: Vec<_> = names
|
||||
.enumerate()
|
||||
.map(|(offset, name)| match export_prefix_length + offset {
|
||||
0 => Edit::insertion(format!("{quote}{name}{quote}"), insertion_point),
|
||||
_ => Edit::insertion(format!(", {quote}{name}{quote}"), insertion_point),
|
||||
})
|
||||
.collect();
|
||||
if let Expr::Tuple(tup) = expr {
|
||||
if tup.parenthesized && export_prefix_length + edits.len() == 1 {
|
||||
edits.push(Edit::insertion(",".to_string(), insertion_point));
|
||||
}
|
||||
}
|
||||
edits
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub(crate) enum Parentheses {
|
||||
/// Remove parentheses, if the removed argument is the only argument left.
|
||||
@@ -526,20 +477,14 @@ fn all_lines_fit(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use anyhow::{anyhow, Result};
|
||||
use std::borrow::Cow;
|
||||
use test_case::test_case;
|
||||
use anyhow::Result;
|
||||
|
||||
use ruff_diagnostics::{Diagnostic, Edit, Fix};
|
||||
use ruff_python_codegen::Stylist;
|
||||
use ruff_python_parser::{lexer, parse_expression, parse_suite, Mode};
|
||||
use ruff_diagnostics::Edit;
|
||||
use ruff_python_parser::parse_suite;
|
||||
use ruff_source_file::Locator;
|
||||
use ruff_text_size::{Ranged, TextRange, TextSize};
|
||||
|
||||
use crate::fix::apply_fixes;
|
||||
use crate::fix::edits::{
|
||||
add_to_dunder_all, make_redundant_alias, next_stmt_break, trailing_semicolon,
|
||||
};
|
||||
use crate::fix::edits::{make_redundant_alias, next_stmt_break, trailing_semicolon};
|
||||
|
||||
#[test]
|
||||
fn find_semicolon() -> Result<()> {
|
||||
@@ -617,7 +562,7 @@ x = 1 \
|
||||
let program = parse_suite(contents).unwrap();
|
||||
let stmt = program.first().unwrap();
|
||||
assert_eq!(
|
||||
make_redundant_alias(["x"].into_iter().map(Cow::from), stmt),
|
||||
make_redundant_alias(["x"].into_iter(), stmt),
|
||||
vec![Edit::range_replacement(
|
||||
String::from("x as x"),
|
||||
TextRange::new(TextSize::new(7), TextSize::new(8)),
|
||||
@@ -625,7 +570,7 @@ x = 1 \
|
||||
"make just one item redundant"
|
||||
);
|
||||
assert_eq!(
|
||||
make_redundant_alias(vec!["x", "y"].into_iter().map(Cow::from), stmt),
|
||||
make_redundant_alias(vec!["x", "y"].into_iter(), stmt),
|
||||
vec![Edit::range_replacement(
|
||||
String::from("x as x"),
|
||||
TextRange::new(TextSize::new(7), TextSize::new(8)),
|
||||
@@ -633,7 +578,7 @@ x = 1 \
|
||||
"the second item is already a redundant alias"
|
||||
);
|
||||
assert_eq!(
|
||||
make_redundant_alias(vec!["x", "z"].into_iter().map(Cow::from), stmt),
|
||||
make_redundant_alias(vec!["x", "z"].into_iter(), stmt),
|
||||
vec![Edit::range_replacement(
|
||||
String::from("x as x"),
|
||||
TextRange::new(TextSize::new(7), TextSize::new(8)),
|
||||
@@ -641,47 +586,4 @@ x = 1 \
|
||||
"the third item is already aliased to something else"
|
||||
);
|
||||
}
|
||||
|
||||
#[test_case("()", &["x", "y"], r#"("x", "y")"# ; "2 into empty tuple")]
|
||||
#[test_case("()", &["x"], r#"("x",)"# ; "1 into empty tuple adding a trailing comma")]
|
||||
#[test_case("[]", &["x", "y"], r#"["x", "y"]"# ; "2 into empty list")]
|
||||
#[test_case("[]", &["x"], r#"["x"]"# ; "1 into empty list")]
|
||||
#[test_case(r#""a", "b""#, &["x", "y"], r#""a", "b", "x", "y""# ; "2 into unparenthesized tuple")]
|
||||
#[test_case(r#""a", "b""#, &["x"], r#""a", "b", "x""# ; "1 into unparenthesized tuple")]
|
||||
#[test_case(r#""a", "b","#, &["x", "y"], r#""a", "b", "x", "y","# ; "2 into unparenthesized tuple w/trailing comma")]
|
||||
#[test_case(r#""a", "b","#, &["x"], r#""a", "b", "x","# ; "1 into unparenthesized tuple w/trailing comma")]
|
||||
#[test_case(r#"("a", "b")"#, &["x", "y"], r#"("a", "b", "x", "y")"# ; "2 into nonempty tuple")]
|
||||
#[test_case(r#"("a", "b")"#, &["x"], r#"("a", "b", "x")"# ; "1 into nonempty tuple")]
|
||||
#[test_case(r#"("a", "b",)"#, &["x", "y"], r#"("a", "b", "x", "y",)"# ; "2 into nonempty tuple w/trailing comma")]
|
||||
#[test_case(r#"("a", "b",)"#, &["x"], r#"("a", "b", "x",)"# ; "1 into nonempty tuple w/trailing comma")]
|
||||
#[test_case(r#"["a", "b",]"#, &["x", "y"], r#"["a", "b", "x", "y",]"# ; "2 into nonempty list w/trailing comma")]
|
||||
#[test_case(r#"["a", "b",]"#, &["x"], r#"["a", "b", "x",]"# ; "1 into nonempty list w/trailing comma")]
|
||||
#[test_case(r#"["a", "b"]"#, &["x", "y"], r#"["a", "b", "x", "y"]"# ; "2 into nonempty list")]
|
||||
#[test_case(r#"["a", "b"]"#, &["x"], r#"["a", "b", "x"]"# ; "1 into nonempty list")]
|
||||
fn add_to_dunder_all_test(raw: &str, names: &[&str], expect: &str) -> Result<()> {
|
||||
let locator = Locator::new(raw);
|
||||
let edits = {
|
||||
let expr = parse_expression(raw)?;
|
||||
let stylist = Stylist::from_tokens(
|
||||
&lexer::lex(raw, Mode::Expression).collect::<Vec<_>>(),
|
||||
&locator,
|
||||
);
|
||||
// SUT
|
||||
add_to_dunder_all(names.iter().copied(), &expr, &stylist)
|
||||
};
|
||||
let diag = {
|
||||
use crate::rules::pycodestyle::rules::MissingNewlineAtEndOfFile;
|
||||
let mut iter = edits.into_iter();
|
||||
Diagnostic::new(
|
||||
MissingNewlineAtEndOfFile, // The choice of rule here is arbitrary.
|
||||
TextRange::default(),
|
||||
)
|
||||
.with_fix(Fix::safe_edits(
|
||||
iter.next().ok_or(anyhow!("expected edits nonempty"))?,
|
||||
iter,
|
||||
))
|
||||
};
|
||||
assert_eq!(apply_fixes([diag].iter(), &locator).code, expect);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -321,6 +321,7 @@ mod tests {
|
||||
|
||||
use ruff_python_ast::PySourceType;
|
||||
use ruff_python_codegen::Stylist;
|
||||
use ruff_python_parser::lexer::LexResult;
|
||||
use ruff_python_parser::{parse_suite, Mode};
|
||||
use ruff_source_file::{LineEnding, Locator};
|
||||
use ruff_text_size::TextSize;
|
||||
@@ -331,7 +332,7 @@ mod tests {
|
||||
fn start_of_file() -> Result<()> {
|
||||
fn insert(contents: &str) -> Result<Insertion> {
|
||||
let program = parse_suite(contents)?;
|
||||
let tokens = ruff_python_parser::tokenize(contents, Mode::Module);
|
||||
let tokens: Vec<LexResult> = ruff_python_parser::tokenize(contents, Mode::Module);
|
||||
let locator = Locator::new(contents);
|
||||
let stylist = Stylist::from_tokens(&tokens, &locator);
|
||||
Ok(Insertion::start_of_file(&program, &locator, &stylist))
|
||||
@@ -442,7 +443,7 @@ x = 1
|
||||
#[test]
|
||||
fn start_of_block() {
|
||||
fn insert(contents: &str, offset: TextSize) -> Insertion {
|
||||
let tokens = ruff_python_parser::tokenize(contents, Mode::Module);
|
||||
let tokens: Vec<LexResult> = ruff_python_parser::tokenize(contents, Mode::Module);
|
||||
let locator = Locator::new(contents);
|
||||
let stylist = Stylist::from_tokens(&tokens, &locator);
|
||||
Insertion::start_of_block(offset, &locator, &stylist, PySourceType::default())
|
||||
|
||||
@@ -14,7 +14,7 @@ use ruff_python_ast::{PySourceType, Suite};
|
||||
use ruff_python_codegen::Stylist;
|
||||
use ruff_python_index::Indexer;
|
||||
use ruff_python_parser::lexer::LexResult;
|
||||
use ruff_python_parser::{AsMode, ParseError, TokenKindIter, Tokens};
|
||||
use ruff_python_parser::{AsMode, ParseError};
|
||||
use ruff_source_file::{Locator, SourceFileBuilder};
|
||||
use ruff_text_size::Ranged;
|
||||
|
||||
@@ -93,7 +93,7 @@ pub fn check_path(
|
||||
let use_doc_lines = settings.rules.enabled(Rule::DocLineTooLong);
|
||||
let mut doc_lines = vec![];
|
||||
if use_doc_lines {
|
||||
doc_lines.extend(doc_lines_from_tokens(tokens.kinds()));
|
||||
doc_lines.extend(doc_lines_from_tokens(&tokens));
|
||||
}
|
||||
|
||||
// Run the token-based rules.
|
||||
@@ -353,7 +353,7 @@ pub fn add_noqa_to_path(
|
||||
let contents = source_kind.source_code();
|
||||
|
||||
// Tokenize once.
|
||||
let tokens = ruff_python_parser::tokenize(contents, source_type.as_mode());
|
||||
let tokens: Vec<LexResult> = ruff_python_parser::tokenize(contents, source_type.as_mode());
|
||||
|
||||
// Map row and column locations to byte slices (lazily).
|
||||
let locator = Locator::new(contents);
|
||||
@@ -518,7 +518,8 @@ pub fn lint_fix<'a>(
|
||||
// Continuously fix until the source code stabilizes.
|
||||
loop {
|
||||
// Tokenize once.
|
||||
let tokens = ruff_python_parser::tokenize(transformed.source_code(), source_type.as_mode());
|
||||
let tokens: Vec<LexResult> =
|
||||
ruff_python_parser::tokenize(transformed.source_code(), source_type.as_mode());
|
||||
|
||||
// Map row and column locations to byte slices (lazily).
|
||||
let locator = Locator::new(transformed.source_code());
|
||||
@@ -714,7 +715,7 @@ impl<'a> ParseSource<'a> {
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum TokenSource<'a> {
|
||||
/// Use the precomputed tokens to generate the AST.
|
||||
Tokens(Tokens),
|
||||
Tokens(Vec<LexResult>),
|
||||
/// Use the precomputed tokens and AST.
|
||||
Precomputed {
|
||||
tokens: &'a [LexResult],
|
||||
@@ -722,18 +723,6 @@ pub enum TokenSource<'a> {
|
||||
},
|
||||
}
|
||||
|
||||
impl TokenSource<'_> {
|
||||
/// Returns an iterator over the [`TokenKind`] and the corresponding range.
|
||||
///
|
||||
/// [`TokenKind`]: ruff_python_parser::TokenKind
|
||||
pub fn kinds(&self) -> TokenKindIter {
|
||||
match self {
|
||||
TokenSource::Tokens(tokens) => tokens.kinds(),
|
||||
TokenSource::Precomputed { tokens, .. } => TokenKindIter::new(tokens),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for TokenSource<'_> {
|
||||
type Target = [LexResult];
|
||||
|
||||
|
||||
@@ -253,7 +253,7 @@ impl Display for DisplayParseError {
|
||||
ErrorLocation::Cell(cell, location) => {
|
||||
write!(
|
||||
f,
|
||||
"cell {cell}{colon}{row}{colon}{column}{colon} {inner}",
|
||||
"{cell}{colon}{row}{colon}{column}{colon} {inner}",
|
||||
cell = cell,
|
||||
row = location.row,
|
||||
column = location.column,
|
||||
|
||||
@@ -270,6 +270,7 @@ impl Rule {
|
||||
| Rule::InvalidCharacterNul
|
||||
| Rule::InvalidCharacterSub
|
||||
| Rule::InvalidCharacterZeroWidthSpace
|
||||
| Rule::InvalidEscapeSequence
|
||||
| Rule::InvalidTodoCapitalization
|
||||
| Rule::InvalidTodoTag
|
||||
| Rule::LineContainsFixme
|
||||
|
||||
@@ -57,7 +57,7 @@ pub(crate) fn hardcoded_bind_all_interfaces(checker: &mut Checker, string: Strin
|
||||
}
|
||||
}
|
||||
ast::FStringPart::FString(f_string) => {
|
||||
for literal in f_string.elements.literals() {
|
||||
for literal in f_string.literals() {
|
||||
if &**literal == "0.0.0.0" {
|
||||
checker.diagnostics.push(Diagnostic::new(
|
||||
HardcodedBindAllInterfaces,
|
||||
|
||||
@@ -64,7 +64,7 @@ pub(crate) fn hardcoded_tmp_directory(checker: &mut Checker, string: StringLike)
|
||||
check(checker, literal, literal.range());
|
||||
}
|
||||
ast::FStringPart::FString(f_string) => {
|
||||
for literal in f_string.elements.literals() {
|
||||
for literal in f_string.literals() {
|
||||
check(checker, literal, literal.range());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,8 @@ use ruff_diagnostics::{AlwaysFixableViolation, Violation};
|
||||
use ruff_diagnostics::{Diagnostic, Edit, Fix};
|
||||
use ruff_macros::{derive_message_formats, violation};
|
||||
use ruff_python_index::Indexer;
|
||||
use ruff_python_parser::{TokenKind, TokenKindIter};
|
||||
use ruff_python_parser::lexer::LexResult;
|
||||
use ruff_python_parser::Tok;
|
||||
use ruff_source_file::Locator;
|
||||
use ruff_text_size::{Ranged, TextRange};
|
||||
|
||||
@@ -51,26 +52,26 @@ impl Token {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(TokenKind, TextRange)> for Token {
|
||||
fn from((tok, range): (TokenKind, TextRange)) -> Self {
|
||||
impl From<(&Tok, TextRange)> for Token {
|
||||
fn from((tok, range): (&Tok, TextRange)) -> Self {
|
||||
let ty = match tok {
|
||||
TokenKind::Name => TokenType::Named,
|
||||
TokenKind::String => TokenType::String,
|
||||
TokenKind::Newline => TokenType::Newline,
|
||||
TokenKind::NonLogicalNewline => TokenType::NonLogicalNewline,
|
||||
TokenKind::Lpar => TokenType::OpeningBracket,
|
||||
TokenKind::Rpar => TokenType::ClosingBracket,
|
||||
TokenKind::Lsqb => TokenType::OpeningSquareBracket,
|
||||
TokenKind::Rsqb => TokenType::ClosingBracket,
|
||||
TokenKind::Colon => TokenType::Colon,
|
||||
TokenKind::Comma => TokenType::Comma,
|
||||
TokenKind::Lbrace => TokenType::OpeningCurlyBracket,
|
||||
TokenKind::Rbrace => TokenType::ClosingBracket,
|
||||
TokenKind::Def => TokenType::Def,
|
||||
TokenKind::For => TokenType::For,
|
||||
TokenKind::Lambda => TokenType::Lambda,
|
||||
Tok::Name { .. } => TokenType::Named,
|
||||
Tok::String { .. } => TokenType::String,
|
||||
Tok::Newline => TokenType::Newline,
|
||||
Tok::NonLogicalNewline => TokenType::NonLogicalNewline,
|
||||
Tok::Lpar => TokenType::OpeningBracket,
|
||||
Tok::Rpar => TokenType::ClosingBracket,
|
||||
Tok::Lsqb => TokenType::OpeningSquareBracket,
|
||||
Tok::Rsqb => TokenType::ClosingBracket,
|
||||
Tok::Colon => TokenType::Colon,
|
||||
Tok::Comma => TokenType::Comma,
|
||||
Tok::Lbrace => TokenType::OpeningCurlyBracket,
|
||||
Tok::Rbrace => TokenType::ClosingBracket,
|
||||
Tok::Def => TokenType::Def,
|
||||
Tok::For => TokenType::For,
|
||||
Tok::Lambda => TokenType::Lambda,
|
||||
// Import treated like a function.
|
||||
TokenKind::Import => TokenType::Named,
|
||||
Tok::Import => TokenType::Named,
|
||||
_ => TokenType::Irrelevant,
|
||||
};
|
||||
#[allow(clippy::inconsistent_struct_constructor)]
|
||||
@@ -226,23 +227,27 @@ impl AlwaysFixableViolation for ProhibitedTrailingComma {
|
||||
/// COM812, COM818, COM819
|
||||
pub(crate) fn trailing_commas(
|
||||
diagnostics: &mut Vec<Diagnostic>,
|
||||
tokens: TokenKindIter,
|
||||
tokens: &[LexResult],
|
||||
locator: &Locator,
|
||||
indexer: &Indexer,
|
||||
) {
|
||||
let mut fstrings = 0u32;
|
||||
let tokens = tokens.filter_map(|(token, tok_range)| {
|
||||
match token {
|
||||
let tokens = tokens.iter().filter_map(|result| {
|
||||
let Ok((tok, tok_range)) = result else {
|
||||
return None;
|
||||
};
|
||||
|
||||
match tok {
|
||||
// Completely ignore comments -- they just interfere with the logic.
|
||||
TokenKind::Comment => None,
|
||||
Tok::Comment(_) => None,
|
||||
// F-strings are handled as `String` token type with the complete range
|
||||
// of the outermost f-string. This means that the expression inside the
|
||||
// f-string is not checked for trailing commas.
|
||||
TokenKind::FStringStart => {
|
||||
Tok::FStringStart(_) => {
|
||||
fstrings = fstrings.saturating_add(1);
|
||||
None
|
||||
}
|
||||
TokenKind::FStringEnd => {
|
||||
Tok::FStringEnd => {
|
||||
fstrings = fstrings.saturating_sub(1);
|
||||
if fstrings == 0 {
|
||||
indexer
|
||||
@@ -255,7 +260,7 @@ pub(crate) fn trailing_commas(
|
||||
}
|
||||
_ => {
|
||||
if fstrings == 0 {
|
||||
Some(Token::from((token, tok_range)))
|
||||
Some(Token::from((tok, *tok_range)))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
||||
@@ -118,11 +118,7 @@ pub(crate) fn call_datetime_strptime_without_zone(checker: &mut Checker, call: &
|
||||
}
|
||||
}
|
||||
ast::FStringPart::FString(f_string) => {
|
||||
if f_string
|
||||
.elements
|
||||
.literals()
|
||||
.any(|literal| literal.contains("%z"))
|
||||
{
|
||||
if f_string.literals().any(|literal| literal.contains("%z")) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,10 @@ use ruff_diagnostics::{Diagnostic, Edit, Fix, FixAvailability, Violation};
|
||||
use ruff_macros::{derive_message_formats, violation};
|
||||
use ruff_python_ast::str::{leading_quote, trailing_quote};
|
||||
use ruff_python_index::Indexer;
|
||||
use ruff_python_parser::{TokenKind, TokenKindIter};
|
||||
use ruff_python_parser::lexer::LexResult;
|
||||
use ruff_python_parser::Tok;
|
||||
use ruff_source_file::Locator;
|
||||
use ruff_text_size::TextRange;
|
||||
use ruff_text_size::{Ranged, TextRange};
|
||||
|
||||
use crate::settings::LinterSettings;
|
||||
|
||||
@@ -92,34 +93,36 @@ impl Violation for MultiLineImplicitStringConcatenation {
|
||||
/// ISC001, ISC002
|
||||
pub(crate) fn implicit(
|
||||
diagnostics: &mut Vec<Diagnostic>,
|
||||
tokens: TokenKindIter,
|
||||
tokens: &[LexResult],
|
||||
settings: &LinterSettings,
|
||||
locator: &Locator,
|
||||
indexer: &Indexer,
|
||||
) {
|
||||
for ((a_tok, a_range), (b_tok, b_range)) in tokens
|
||||
.filter(|(token, _)| {
|
||||
*token != TokenKind::Comment
|
||||
.iter()
|
||||
.flatten()
|
||||
.filter(|(tok, _)| {
|
||||
!tok.is_comment()
|
||||
&& (settings.flake8_implicit_str_concat.allow_multiline
|
||||
|| *token != TokenKind::NonLogicalNewline)
|
||||
|| !tok.is_non_logical_newline())
|
||||
})
|
||||
.tuple_windows()
|
||||
{
|
||||
let (a_range, b_range) = match (a_tok, b_tok) {
|
||||
(TokenKind::String, TokenKind::String) => (a_range, b_range),
|
||||
(TokenKind::String, TokenKind::FStringStart) => {
|
||||
(Tok::String { .. }, Tok::String { .. }) => (*a_range, *b_range),
|
||||
(Tok::String { .. }, Tok::FStringStart(_)) => {
|
||||
match indexer.fstring_ranges().innermost(b_range.start()) {
|
||||
Some(b_range) => (a_range, b_range),
|
||||
Some(b_range) => (*a_range, b_range),
|
||||
None => continue,
|
||||
}
|
||||
}
|
||||
(TokenKind::FStringEnd, TokenKind::String) => {
|
||||
(Tok::FStringEnd, Tok::String { .. }) => {
|
||||
match indexer.fstring_ranges().innermost(a_range.start()) {
|
||||
Some(a_range) => (a_range, b_range),
|
||||
Some(a_range) => (a_range, *b_range),
|
||||
None => continue,
|
||||
}
|
||||
}
|
||||
(TokenKind::FStringEnd, TokenKind::FStringStart) => {
|
||||
(Tok::FStringEnd, Tok::FStringStart(_)) => {
|
||||
match (
|
||||
indexer.fstring_ranges().innermost(a_range.start()),
|
||||
indexer.fstring_ranges().innermost(b_range.start()),
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
use ruff_python_ast::AnyStringFlags;
|
||||
use ruff_python_ast::AnyStringKind;
|
||||
use ruff_text_size::TextLen;
|
||||
|
||||
/// Returns the raw contents of the string given the string's contents and flags.
|
||||
/// Returns the raw contents of the string given the string's contents and kind.
|
||||
/// This is a string without the prefix and quotes.
|
||||
pub(super) fn raw_contents(contents: &str, flags: AnyStringFlags) -> &str {
|
||||
&contents[flags.opener_len().to_usize()..(contents.text_len() - flags.closer_len()).to_usize()]
|
||||
pub(super) fn raw_contents(contents: &str, kind: AnyStringKind) -> &str {
|
||||
&contents[kind.opener_len().to_usize()..(contents.text_len() - kind.closer_len()).to_usize()]
|
||||
}
|
||||
|
||||
/// Return `true` if the haystack contains an escaped quote.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use ruff_diagnostics::{AlwaysFixableViolation, Diagnostic, Edit, Fix};
|
||||
use ruff_macros::{derive_message_formats, violation};
|
||||
use ruff_python_ast::visitor::{walk_f_string, Visitor};
|
||||
use ruff_python_ast::{self as ast, AnyStringFlags, StringLike};
|
||||
use ruff_python_ast::{self as ast, AnyStringKind, StringLike};
|
||||
use ruff_source_file::Locator;
|
||||
use ruff_text_size::{Ranged, TextRange, TextSize};
|
||||
|
||||
@@ -110,7 +110,7 @@ impl Visitor<'_> for AvoidableEscapedQuoteChecker<'_> {
|
||||
self.locator,
|
||||
self.quotes_settings,
|
||||
string_literal.range(),
|
||||
AnyStringFlags::from(string_literal.flags),
|
||||
AnyStringKind::from(string_literal.flags),
|
||||
) {
|
||||
self.diagnostics.push(diagnostic);
|
||||
}
|
||||
@@ -121,7 +121,7 @@ impl Visitor<'_> for AvoidableEscapedQuoteChecker<'_> {
|
||||
self.locator,
|
||||
self.quotes_settings,
|
||||
bytes_literal.range(),
|
||||
AnyStringFlags::from(bytes_literal.flags),
|
||||
AnyStringKind::from(bytes_literal.flags),
|
||||
) {
|
||||
self.diagnostics.push(diagnostic);
|
||||
}
|
||||
@@ -192,7 +192,6 @@ impl Visitor<'_> for AvoidableEscapedQuoteChecker<'_> {
|
||||
// f"'normal' {f'\'nested\' {x} "double quotes"'} normal"
|
||||
// ```
|
||||
if !f_string
|
||||
.elements
|
||||
.literals()
|
||||
.any(|literal| contains_quote(literal, opposite_quote_char))
|
||||
{
|
||||
@@ -214,20 +213,20 @@ fn check_string_or_bytes(
|
||||
locator: &Locator,
|
||||
quotes_settings: &flake8_quotes::settings::Settings,
|
||||
range: TextRange,
|
||||
flags: AnyStringFlags,
|
||||
kind: AnyStringKind,
|
||||
) -> Option<Diagnostic> {
|
||||
assert!(!flags.is_f_string());
|
||||
assert!(!kind.is_f_string());
|
||||
|
||||
if flags.is_triple_quoted() || flags.is_raw_string() {
|
||||
if kind.is_triple_quoted() || kind.is_raw_string() {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Check if we're using the preferred quotation style.
|
||||
if Quote::from(flags.quote_style()) != quotes_settings.inline_quotes {
|
||||
if Quote::from(kind.quote_style()) != quotes_settings.inline_quotes {
|
||||
return None;
|
||||
}
|
||||
|
||||
let contents = raw_contents(locator.slice(range), flags);
|
||||
let contents = raw_contents(locator.slice(range), kind);
|
||||
|
||||
if !contains_escaped_quote(contents, quotes_settings.inline_quotes.as_char())
|
||||
|| contains_quote(contents, quotes_settings.inline_quotes.opposite().as_char())
|
||||
@@ -238,7 +237,7 @@ fn check_string_or_bytes(
|
||||
let mut diagnostic = Diagnostic::new(AvoidableEscapedQuote, range);
|
||||
let fixed_contents = format!(
|
||||
"{prefix}{quote}{value}{quote}",
|
||||
prefix = flags.prefix(),
|
||||
prefix = kind.prefix(),
|
||||
quote = quotes_settings.inline_quotes.opposite().as_char(),
|
||||
value = unescape_string(contents, quotes_settings.inline_quotes.as_char())
|
||||
);
|
||||
@@ -270,7 +269,7 @@ fn check_f_string(
|
||||
let opposite_quote_char = quotes_settings.inline_quotes.opposite().as_char();
|
||||
|
||||
let mut edits = vec![];
|
||||
for literal in f_string.elements.literals() {
|
||||
for literal in f_string.literals() {
|
||||
let content = locator.slice(literal);
|
||||
if !contains_escaped_quote(content, quote_char) {
|
||||
continue;
|
||||
|
||||
@@ -454,7 +454,10 @@ pub(crate) fn check_string_quotes(checker: &mut Checker, string_like: StringLike
|
||||
return;
|
||||
}
|
||||
|
||||
let ranges: Vec<_> = string_like.parts().map(|part| part.range()).collect();
|
||||
let ranges = string_like
|
||||
.parts()
|
||||
.map(|part| part.range())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if checker.semantic().in_pep_257_docstring() {
|
||||
if checker.enabled(Rule::BadQuotesDocstring) {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use ruff_diagnostics::{AlwaysFixableViolation, Diagnostic, Edit, Fix};
|
||||
use ruff_macros::{derive_message_formats, violation};
|
||||
use ruff_python_ast::{self as ast, AnyStringFlags, StringLike};
|
||||
use ruff_python_ast::{self as ast, AnyStringKind, StringLike};
|
||||
use ruff_source_file::Locator;
|
||||
use ruff_text_size::{Ranged, TextRange};
|
||||
|
||||
@@ -59,7 +59,7 @@ pub(crate) fn unnecessary_escaped_quote(checker: &mut Checker, string_like: Stri
|
||||
if let Some(diagnostic) = check_string_or_bytes(
|
||||
locator,
|
||||
string_literal.range(),
|
||||
AnyStringFlags::from(string_literal.flags),
|
||||
AnyStringKind::from(string_literal.flags),
|
||||
) {
|
||||
checker.diagnostics.push(diagnostic);
|
||||
}
|
||||
@@ -68,7 +68,7 @@ pub(crate) fn unnecessary_escaped_quote(checker: &mut Checker, string_like: Stri
|
||||
if let Some(diagnostic) = check_string_or_bytes(
|
||||
locator,
|
||||
bytes_literal.range(),
|
||||
AnyStringFlags::from(bytes_literal.flags),
|
||||
AnyStringKind::from(bytes_literal.flags),
|
||||
) {
|
||||
checker.diagnostics.push(diagnostic);
|
||||
}
|
||||
@@ -90,16 +90,16 @@ pub(crate) fn unnecessary_escaped_quote(checker: &mut Checker, string_like: Stri
|
||||
fn check_string_or_bytes(
|
||||
locator: &Locator,
|
||||
range: TextRange,
|
||||
flags: AnyStringFlags,
|
||||
kind: AnyStringKind,
|
||||
) -> Option<Diagnostic> {
|
||||
assert!(!flags.is_f_string());
|
||||
assert!(!kind.is_f_string());
|
||||
|
||||
if flags.is_triple_quoted() || flags.is_raw_string() {
|
||||
if kind.is_triple_quoted() || kind.is_raw_string() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let contents = raw_contents(locator.slice(range), flags);
|
||||
let quote = flags.quote_style();
|
||||
let contents = raw_contents(locator.slice(range), kind);
|
||||
let quote = kind.quote_style();
|
||||
let opposite_quote_char = quote.opposite().as_char();
|
||||
|
||||
if !contains_escaped_quote(contents, opposite_quote_char) {
|
||||
@@ -108,7 +108,7 @@ fn check_string_or_bytes(
|
||||
|
||||
let mut diagnostic = Diagnostic::new(UnnecessaryEscapedQuote, range);
|
||||
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
|
||||
flags.format_string_contents(&unescape_string(contents, opposite_quote_char)),
|
||||
kind.format_string_contents(&unescape_string(contents, opposite_quote_char)),
|
||||
range,
|
||||
)));
|
||||
Some(diagnostic)
|
||||
@@ -124,7 +124,7 @@ fn check_f_string(locator: &Locator, f_string: &ast::FString) -> Option<Diagnost
|
||||
let opposite_quote_char = flags.quote_style().opposite().as_char();
|
||||
|
||||
let mut edits = vec![];
|
||||
for literal in f_string.elements.literals() {
|
||||
for literal in f_string.literals() {
|
||||
let content = locator.slice(literal);
|
||||
if !contains_escaped_quote(content, opposite_quote_char) {
|
||||
continue;
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
use ruff_python_ast::{
|
||||
self as ast, str_prefix::StringLiteralPrefix, Arguments, Expr, StringLiteralFlags,
|
||||
};
|
||||
use ast::{StringLiteralFlags, StringLiteralPrefix};
|
||||
use ruff_python_ast::{self as ast, Arguments, Expr};
|
||||
use ruff_text_size::Ranged;
|
||||
|
||||
use crate::fix::snippet::SourceCodeSnippet;
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use anyhow::Result;
|
||||
use std::cmp::Reverse;
|
||||
|
||||
use ruff_diagnostics::Edit;
|
||||
use ruff_python_ast::helpers::{map_callable, map_subscript};
|
||||
@@ -287,17 +286,11 @@ pub(crate) fn quote_annotation(
|
||||
|
||||
/// Filter out any [`Edit`]s that are completely contained by any other [`Edit`].
|
||||
pub(crate) fn filter_contained(edits: Vec<Edit>) -> Vec<Edit> {
|
||||
let mut edits = edits;
|
||||
|
||||
// Sort such that the largest edits are prioritized.
|
||||
edits.sort_unstable_by_key(|edit| (edit.start(), Reverse(edit.end())));
|
||||
|
||||
// Remove any edits that are completely contained by another edit.
|
||||
let mut filtered: Vec<Edit> = Vec::with_capacity(edits.len());
|
||||
for edit in edits {
|
||||
if !filtered
|
||||
if filtered
|
||||
.iter()
|
||||
.any(|filtered_edit| filtered_edit.range().contains_range(edit.range()))
|
||||
.all(|filtered_edit| !filtered_edit.range().contains_range(edit.range()))
|
||||
{
|
||||
filtered.push(edit);
|
||||
}
|
||||
|
||||
@@ -296,8 +296,6 @@ quote.py:78:24: TCH002 [*] Move third-party import `pandas.DataFrame` into a typ
|
||||
91 |- def func() -> DataFrame[[DataFrame[_P, _R]], DataFrame[_P, _R]]:
|
||||
91 |+ def func() -> "DataFrame[[DataFrame[_P, _R]], DataFrame[_P, _R]]":
|
||||
92 92 | ...
|
||||
93 93 |
|
||||
94 94 |
|
||||
|
||||
quote.py:78:35: TCH002 [*] Move third-party import `pandas.Series` into a type-checking block
|
||||
|
|
||||
@@ -339,61 +337,5 @@ quote.py:78:35: TCH002 [*] Move third-party import `pandas.Series` into a type-c
|
||||
91 |- def func() -> DataFrame[[DataFrame[_P, _R]], DataFrame[_P, _R]]:
|
||||
91 |+ def func() -> "DataFrame[[DataFrame[_P, _R]], DataFrame[_P, _R]]":
|
||||
92 92 | ...
|
||||
93 93 |
|
||||
94 94 |
|
||||
|
||||
quote.py:96:24: TCH002 [*] Move third-party import `pandas.DataFrame` into a type-checking block
|
||||
|
|
||||
95 | def f():
|
||||
96 | from pandas import DataFrame, Series
|
||||
| ^^^^^^^^^ TCH002
|
||||
97 |
|
||||
98 | def func(self) -> DataFrame | list[Series]:
|
||||
|
|
||||
= help: Move into type-checking block
|
||||
|
||||
ℹ Unsafe fix
|
||||
1 |+from typing import TYPE_CHECKING
|
||||
2 |+
|
||||
3 |+if TYPE_CHECKING:
|
||||
4 |+ from pandas import DataFrame, Series
|
||||
1 5 | def f():
|
||||
2 6 | from pandas import DataFrame
|
||||
3 7 |
|
||||
--------------------------------------------------------------------------------
|
||||
93 97 |
|
||||
94 98 |
|
||||
95 99 | def f():
|
||||
96 |- from pandas import DataFrame, Series
|
||||
97 100 |
|
||||
98 |- def func(self) -> DataFrame | list[Series]:
|
||||
101 |+ def func(self) -> "DataFrame | list[Series]":
|
||||
99 102 | pass
|
||||
|
||||
quote.py:96:35: TCH002 [*] Move third-party import `pandas.Series` into a type-checking block
|
||||
|
|
||||
95 | def f():
|
||||
96 | from pandas import DataFrame, Series
|
||||
| ^^^^^^ TCH002
|
||||
97 |
|
||||
98 | def func(self) -> DataFrame | list[Series]:
|
||||
|
|
||||
= help: Move into type-checking block
|
||||
|
||||
ℹ Unsafe fix
|
||||
1 |+from typing import TYPE_CHECKING
|
||||
2 |+
|
||||
3 |+if TYPE_CHECKING:
|
||||
4 |+ from pandas import DataFrame, Series
|
||||
1 5 | def f():
|
||||
2 6 | from pandas import DataFrame
|
||||
3 7 |
|
||||
--------------------------------------------------------------------------------
|
||||
93 97 |
|
||||
94 98 |
|
||||
95 99 | def f():
|
||||
96 |- from pandas import DataFrame, Series
|
||||
97 100 |
|
||||
98 |- def func(self) -> DataFrame | list[Series]:
|
||||
101 |+ def func(self) -> "DataFrame | list[Series]":
|
||||
99 102 | pass
|
||||
|
||||
@@ -96,7 +96,7 @@ fn build_fstring(joiner: &str, joinees: &[Expr]) -> Option<Expr> {
|
||||
}
|
||||
|
||||
let node = ast::FString {
|
||||
elements: f_string_elements.into(),
|
||||
elements: f_string_elements,
|
||||
range: TextRange::default(),
|
||||
flags: FStringFlags::default(),
|
||||
};
|
||||
|
||||
@@ -12,7 +12,10 @@ use ruff_diagnostics::Fix;
|
||||
use ruff_macros::{derive_message_formats, violation};
|
||||
use ruff_python_ast::PySourceType;
|
||||
use ruff_python_codegen::Stylist;
|
||||
use ruff_python_parser::{TokenKind, TokenKindIter};
|
||||
use ruff_python_parser::lexer::LexResult;
|
||||
use ruff_python_parser::lexer::LexicalError;
|
||||
use ruff_python_parser::Tok;
|
||||
use ruff_python_parser::TokenKind;
|
||||
use ruff_source_file::{Locator, UniversalNewlines};
|
||||
use ruff_text_size::TextRange;
|
||||
use ruff_text_size::TextSize;
|
||||
@@ -381,7 +384,7 @@ struct LogicalLineInfo {
|
||||
/// Iterator that processes tokens until a full logical line (or comment line) is "built".
|
||||
/// It then returns characteristics of that logical line (see `LogicalLineInfo`).
|
||||
struct LinePreprocessor<'a> {
|
||||
tokens: TokenKindIter<'a>,
|
||||
tokens: Iter<'a, Result<(Tok, TextRange), LexicalError>>,
|
||||
locator: &'a Locator<'a>,
|
||||
indent_width: IndentWidth,
|
||||
/// The start position of the next logical line.
|
||||
@@ -397,13 +400,13 @@ struct LinePreprocessor<'a> {
|
||||
|
||||
impl<'a> LinePreprocessor<'a> {
|
||||
fn new(
|
||||
tokens: TokenKindIter<'a>,
|
||||
tokens: &'a [LexResult],
|
||||
locator: &'a Locator,
|
||||
indent_width: IndentWidth,
|
||||
cell_offsets: Option<&'a CellOffsets>,
|
||||
) -> LinePreprocessor<'a> {
|
||||
LinePreprocessor {
|
||||
tokens,
|
||||
tokens: tokens.iter(),
|
||||
locator,
|
||||
line_start: TextSize::new(0),
|
||||
max_preceding_blank_lines: BlankLines::Zero,
|
||||
@@ -427,11 +430,17 @@ impl<'a> Iterator for LinePreprocessor<'a> {
|
||||
let mut last_token: TokenKind = TokenKind::EndOfFile;
|
||||
let mut parens = 0u32;
|
||||
|
||||
while let Some((token, range)) = self.tokens.next() {
|
||||
if matches!(token, TokenKind::Indent | TokenKind::Dedent) {
|
||||
while let Some(result) = self.tokens.next() {
|
||||
let Ok((token, range)) = result else {
|
||||
continue;
|
||||
};
|
||||
|
||||
if matches!(token, Tok::Indent | Tok::Dedent) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let token_kind = TokenKind::from_token(token);
|
||||
|
||||
let (logical_line_kind, first_token_range) = if let Some(first_token_range) =
|
||||
first_logical_line_token
|
||||
{
|
||||
@@ -453,23 +462,25 @@ impl<'a> Iterator for LinePreprocessor<'a> {
|
||||
}
|
||||
|
||||
// An empty line
|
||||
if token == TokenKind::NonLogicalNewline {
|
||||
blank_lines.add(range);
|
||||
if token_kind == TokenKind::NonLogicalNewline {
|
||||
blank_lines.add(*range);
|
||||
|
||||
self.line_start = range.end();
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
is_docstring = token == TokenKind::String;
|
||||
is_docstring = token_kind == TokenKind::String;
|
||||
|
||||
let logical_line_kind = match token {
|
||||
let logical_line_kind = match token_kind {
|
||||
TokenKind::Class => LogicalLineKind::Class,
|
||||
TokenKind::Comment => LogicalLineKind::Comment,
|
||||
TokenKind::At => LogicalLineKind::Decorator,
|
||||
TokenKind::Def => LogicalLineKind::Function,
|
||||
// Lookahead to distinguish `async def` from `async with`.
|
||||
TokenKind::Async if matches!(self.tokens.peek(), Some((TokenKind::Def, _))) => {
|
||||
TokenKind::Async
|
||||
if matches!(self.tokens.as_slice().first(), Some(Ok((Tok::Def, _)))) =>
|
||||
{
|
||||
LogicalLineKind::Function
|
||||
}
|
||||
TokenKind::Import => LogicalLineKind::Import,
|
||||
@@ -477,22 +488,22 @@ impl<'a> Iterator for LinePreprocessor<'a> {
|
||||
_ => LogicalLineKind::Other,
|
||||
};
|
||||
|
||||
first_logical_line_token = Some((logical_line_kind, range));
|
||||
first_logical_line_token = Some((logical_line_kind, *range));
|
||||
|
||||
(logical_line_kind, range)
|
||||
(logical_line_kind, *range)
|
||||
};
|
||||
|
||||
if !token.is_trivia() {
|
||||
if !token_kind.is_trivia() {
|
||||
line_is_comment_only = false;
|
||||
}
|
||||
|
||||
// A docstring line is composed only of the docstring (TokenKind::String) and trivia tokens.
|
||||
// (If a comment follows a docstring, we still count the line as a docstring)
|
||||
if token != TokenKind::String && !token.is_trivia() {
|
||||
if token_kind != TokenKind::String && !token_kind.is_trivia() {
|
||||
is_docstring = false;
|
||||
}
|
||||
|
||||
match token {
|
||||
match token_kind {
|
||||
TokenKind::Lbrace | TokenKind::Lpar | TokenKind::Lsqb => {
|
||||
parens = parens.saturating_add(1);
|
||||
}
|
||||
@@ -538,8 +549,8 @@ impl<'a> Iterator for LinePreprocessor<'a> {
|
||||
_ => {}
|
||||
}
|
||||
|
||||
if !token.is_trivia() {
|
||||
last_token = token;
|
||||
if !token_kind.is_trivia() {
|
||||
last_token = token_kind;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -722,7 +733,7 @@ impl<'a> BlankLinesChecker<'a> {
|
||||
}
|
||||
|
||||
/// E301, E302, E303, E304, E305, E306
|
||||
pub(crate) fn check_lines(&self, tokens: TokenKindIter<'a>, diagnostics: &mut Vec<Diagnostic>) {
|
||||
pub(crate) fn check_lines(&self, tokens: &[LexResult], diagnostics: &mut Vec<Diagnostic>) {
|
||||
let mut prev_indent_length: Option<usize> = None;
|
||||
let mut state = BlankLinesState::default();
|
||||
let line_preprocessor =
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use ruff_notebook::CellOffsets;
|
||||
use ruff_python_ast::PySourceType;
|
||||
use ruff_python_parser::{TokenKind, TokenKindIter};
|
||||
use ruff_python_parser::lexer::LexResult;
|
||||
use ruff_python_parser::Tok;
|
||||
use ruff_text_size::{TextRange, TextSize};
|
||||
|
||||
use ruff_diagnostics::{AlwaysFixableViolation, Violation};
|
||||
@@ -99,7 +100,7 @@ impl AlwaysFixableViolation for UselessSemicolon {
|
||||
/// E701, E702, E703
|
||||
pub(crate) fn compound_statements(
|
||||
diagnostics: &mut Vec<Diagnostic>,
|
||||
mut tokens: TokenKindIter,
|
||||
lxr: &[LexResult],
|
||||
locator: &Locator,
|
||||
indexer: &Indexer,
|
||||
source_type: PySourceType,
|
||||
@@ -133,36 +134,39 @@ pub(crate) fn compound_statements(
|
||||
// Track indentation.
|
||||
let mut indent = 0u32;
|
||||
|
||||
while let Some((token, range)) = tokens.next() {
|
||||
match token {
|
||||
TokenKind::Lpar => {
|
||||
// Keep the token iterator to perform lookaheads.
|
||||
let mut tokens = lxr.iter().flatten();
|
||||
|
||||
while let Some(&(ref tok, range)) = tokens.next() {
|
||||
match tok {
|
||||
Tok::Lpar => {
|
||||
par_count = par_count.saturating_add(1);
|
||||
}
|
||||
TokenKind::Rpar => {
|
||||
Tok::Rpar => {
|
||||
par_count = par_count.saturating_sub(1);
|
||||
}
|
||||
TokenKind::Lsqb => {
|
||||
Tok::Lsqb => {
|
||||
sqb_count = sqb_count.saturating_add(1);
|
||||
}
|
||||
TokenKind::Rsqb => {
|
||||
Tok::Rsqb => {
|
||||
sqb_count = sqb_count.saturating_sub(1);
|
||||
}
|
||||
TokenKind::Lbrace => {
|
||||
Tok::Lbrace => {
|
||||
brace_count = brace_count.saturating_add(1);
|
||||
}
|
||||
TokenKind::Rbrace => {
|
||||
Tok::Rbrace => {
|
||||
brace_count = brace_count.saturating_sub(1);
|
||||
}
|
||||
TokenKind::Ellipsis => {
|
||||
Tok::Ellipsis => {
|
||||
if allow_ellipsis {
|
||||
allow_ellipsis = false;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
TokenKind::Indent => {
|
||||
Tok::Indent => {
|
||||
indent = indent.saturating_add(1);
|
||||
}
|
||||
TokenKind::Dedent => {
|
||||
Tok::Dedent => {
|
||||
indent = indent.saturating_sub(1);
|
||||
}
|
||||
_ => {}
|
||||
@@ -172,8 +176,8 @@ pub(crate) fn compound_statements(
|
||||
continue;
|
||||
}
|
||||
|
||||
match token {
|
||||
TokenKind::Newline => {
|
||||
match tok {
|
||||
Tok::Newline => {
|
||||
if let Some((start, end)) = semi {
|
||||
if !(source_type.is_ipynb()
|
||||
&& indent == 0
|
||||
@@ -211,7 +215,7 @@ pub(crate) fn compound_statements(
|
||||
while_ = None;
|
||||
with = None;
|
||||
}
|
||||
TokenKind::Colon => {
|
||||
Tok::Colon => {
|
||||
if case.is_some()
|
||||
|| class.is_some()
|
||||
|| elif.is_some()
|
||||
@@ -231,14 +235,11 @@ pub(crate) fn compound_statements(
|
||||
allow_ellipsis = true;
|
||||
}
|
||||
}
|
||||
TokenKind::Semi => {
|
||||
Tok::Semi => {
|
||||
semi = Some((range.start(), range.end()));
|
||||
allow_ellipsis = false;
|
||||
}
|
||||
TokenKind::Comment
|
||||
| TokenKind::Indent
|
||||
| TokenKind::Dedent
|
||||
| TokenKind::NonLogicalNewline => {}
|
||||
Tok::Comment(..) | Tok::Indent | Tok::Dedent | Tok::NonLogicalNewline => {}
|
||||
_ => {
|
||||
if let Some((start, end)) = semi {
|
||||
diagnostics.push(Diagnostic::new(
|
||||
@@ -276,8 +277,8 @@ pub(crate) fn compound_statements(
|
||||
}
|
||||
}
|
||||
|
||||
match token {
|
||||
TokenKind::Lambda => {
|
||||
match tok {
|
||||
Tok::Lambda => {
|
||||
// Reset.
|
||||
colon = None;
|
||||
case = None;
|
||||
@@ -293,40 +294,40 @@ pub(crate) fn compound_statements(
|
||||
while_ = None;
|
||||
with = None;
|
||||
}
|
||||
TokenKind::Case => {
|
||||
Tok::Case => {
|
||||
case = Some((range.start(), range.end()));
|
||||
}
|
||||
TokenKind::If => {
|
||||
Tok::If => {
|
||||
if_ = Some((range.start(), range.end()));
|
||||
}
|
||||
TokenKind::While => {
|
||||
Tok::While => {
|
||||
while_ = Some((range.start(), range.end()));
|
||||
}
|
||||
TokenKind::For => {
|
||||
Tok::For => {
|
||||
for_ = Some((range.start(), range.end()));
|
||||
}
|
||||
TokenKind::Try => {
|
||||
Tok::Try => {
|
||||
try_ = Some((range.start(), range.end()));
|
||||
}
|
||||
TokenKind::Except => {
|
||||
Tok::Except => {
|
||||
except = Some((range.start(), range.end()));
|
||||
}
|
||||
TokenKind::Finally => {
|
||||
Tok::Finally => {
|
||||
finally = Some((range.start(), range.end()));
|
||||
}
|
||||
TokenKind::Elif => {
|
||||
Tok::Elif => {
|
||||
elif = Some((range.start(), range.end()));
|
||||
}
|
||||
TokenKind::Else => {
|
||||
Tok::Else => {
|
||||
else_ = Some((range.start(), range.end()));
|
||||
}
|
||||
TokenKind::Class => {
|
||||
Tok::Class => {
|
||||
class = Some((range.start(), range.end()));
|
||||
}
|
||||
TokenKind::With => {
|
||||
Tok::With => {
|
||||
with = Some((range.start(), range.end()));
|
||||
}
|
||||
TokenKind::Match => {
|
||||
Tok::Match => {
|
||||
match_ = Some((range.start(), range.end()));
|
||||
}
|
||||
_ => {}
|
||||
@@ -336,17 +337,17 @@ pub(crate) fn compound_statements(
|
||||
|
||||
/// Returns `true` if there are any non-trivia tokens from the given token
|
||||
/// iterator till the given end offset.
|
||||
fn has_non_trivia_tokens_till(tokens: TokenKindIter, cell_end: TextSize) -> bool {
|
||||
for (token, tok_range) in tokens {
|
||||
fn has_non_trivia_tokens_till<'a>(
|
||||
tokens: impl Iterator<Item = &'a (Tok, TextRange)>,
|
||||
cell_end: TextSize,
|
||||
) -> bool {
|
||||
for &(ref tok, tok_range) in tokens {
|
||||
if tok_range.start() >= cell_end {
|
||||
return false;
|
||||
}
|
||||
if !matches!(
|
||||
token,
|
||||
TokenKind::Newline
|
||||
| TokenKind::Comment
|
||||
| TokenKind::EndOfFile
|
||||
| TokenKind::NonLogicalNewline
|
||||
tok,
|
||||
Tok::Newline | Tok::Comment(_) | Tok::EndOfFile | Tok::NonLogicalNewline
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -2,11 +2,11 @@ use memchr::memchr_iter;
|
||||
|
||||
use ruff_diagnostics::{AlwaysFixableViolation, Diagnostic, Edit, Fix};
|
||||
use ruff_macros::{derive_message_formats, violation};
|
||||
use ruff_python_ast::{AnyStringFlags, FStringElement, StringLike, StringLikePart};
|
||||
use ruff_python_index::Indexer;
|
||||
use ruff_python_parser::Tok;
|
||||
use ruff_source_file::Locator;
|
||||
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
|
||||
|
||||
use crate::checkers::ast::Checker;
|
||||
use crate::fix::edits::pad_start;
|
||||
|
||||
/// ## What it does
|
||||
@@ -59,83 +59,38 @@ impl AlwaysFixableViolation for InvalidEscapeSequence {
|
||||
}
|
||||
|
||||
/// W605
|
||||
pub(crate) fn invalid_escape_sequence(checker: &mut Checker, string_like: StringLike) {
|
||||
let locator = checker.locator();
|
||||
|
||||
for part in string_like.parts() {
|
||||
if part.flags().is_raw_string() {
|
||||
continue;
|
||||
}
|
||||
match part {
|
||||
StringLikePart::String(string_literal) => {
|
||||
check(
|
||||
&mut checker.diagnostics,
|
||||
locator,
|
||||
string_literal.start(),
|
||||
string_literal.range(),
|
||||
AnyStringFlags::from(string_literal.flags),
|
||||
);
|
||||
}
|
||||
StringLikePart::Bytes(bytes_literal) => {
|
||||
check(
|
||||
&mut checker.diagnostics,
|
||||
locator,
|
||||
bytes_literal.start(),
|
||||
bytes_literal.range(),
|
||||
AnyStringFlags::from(bytes_literal.flags),
|
||||
);
|
||||
}
|
||||
StringLikePart::FString(f_string) => {
|
||||
let flags = AnyStringFlags::from(f_string.flags);
|
||||
for element in &f_string.elements {
|
||||
match element {
|
||||
FStringElement::Literal(literal) => {
|
||||
check(
|
||||
&mut checker.diagnostics,
|
||||
locator,
|
||||
f_string.start(),
|
||||
literal.range(),
|
||||
flags,
|
||||
);
|
||||
}
|
||||
FStringElement::Expression(expression) => {
|
||||
let Some(format_spec) = expression.format_spec.as_ref() else {
|
||||
continue;
|
||||
};
|
||||
for literal in format_spec.elements.literals() {
|
||||
check(
|
||||
&mut checker.diagnostics,
|
||||
locator,
|
||||
f_string.start(),
|
||||
literal.range(),
|
||||
flags,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn check(
|
||||
pub(crate) fn invalid_escape_sequence(
|
||||
diagnostics: &mut Vec<Diagnostic>,
|
||||
locator: &Locator,
|
||||
// Start position of the expression that contains the source range. This is used to generate
|
||||
// the fix when the source range is part of the expression like in f-string which contains
|
||||
// other f-string literal elements.
|
||||
expr_start: TextSize,
|
||||
// Range in the source code to perform the check on.
|
||||
source_range: TextRange,
|
||||
flags: AnyStringFlags,
|
||||
indexer: &Indexer,
|
||||
token: &Tok,
|
||||
token_range: TextRange,
|
||||
) {
|
||||
let source = locator.slice(source_range);
|
||||
let (token_source_code, string_start_location, kind) = match token {
|
||||
Tok::FStringMiddle { kind, .. } => {
|
||||
if kind.is_raw_string() {
|
||||
return;
|
||||
}
|
||||
let Some(f_string_range) = indexer.fstring_ranges().innermost(token_range.start())
|
||||
else {
|
||||
return;
|
||||
};
|
||||
(locator.slice(token_range), f_string_range.start(), kind)
|
||||
}
|
||||
Tok::String { kind, .. } => {
|
||||
if kind.is_raw_string() {
|
||||
return;
|
||||
}
|
||||
(locator.slice(token_range), token_range.start(), kind)
|
||||
}
|
||||
_ => return,
|
||||
};
|
||||
|
||||
let mut contains_valid_escape_sequence = false;
|
||||
let mut invalid_escape_chars = Vec::new();
|
||||
|
||||
let mut prev = None;
|
||||
let bytes = source.as_bytes();
|
||||
let bytes = token_source_code.as_bytes();
|
||||
for i in memchr_iter(b'\\', bytes) {
|
||||
// If the previous character was also a backslash, skip.
|
||||
if prev.is_some_and(|prev| prev == i - 1) {
|
||||
@@ -145,9 +100,9 @@ fn check(
|
||||
|
||||
prev = Some(i);
|
||||
|
||||
let next_char = match source[i + 1..].chars().next() {
|
||||
let next_char = match token_source_code[i + 1..].chars().next() {
|
||||
Some(next_char) => next_char,
|
||||
None if flags.is_f_string() => {
|
||||
None if token.is_f_string_middle() => {
|
||||
// If we're at the end of a f-string middle token, the next character
|
||||
// is actually emitted as a different token. For example,
|
||||
//
|
||||
@@ -169,7 +124,7 @@ fn check(
|
||||
// f-string. This means that if there's a `FStringMiddle` token and we
|
||||
// encounter a `\` character, then the next character is always going to
|
||||
// be part of the f-string.
|
||||
if let Some(next_char) = locator.after(source_range.end()).chars().next() {
|
||||
if let Some(next_char) = locator.after(token_range.end()).chars().next() {
|
||||
next_char
|
||||
} else {
|
||||
continue;
|
||||
@@ -217,7 +172,7 @@ fn check(
|
||||
continue;
|
||||
}
|
||||
|
||||
let location = source_range.start() + TextSize::try_from(i).unwrap();
|
||||
let location = token_range.start() + TextSize::try_from(i).unwrap();
|
||||
let range = TextRange::at(location, next_char.text_len() + TextSize::from(1));
|
||||
invalid_escape_chars.push(InvalidEscapeChar {
|
||||
ch: next_char,
|
||||
@@ -225,6 +180,7 @@ fn check(
|
||||
});
|
||||
}
|
||||
|
||||
let mut invalid_escape_sequence = Vec::new();
|
||||
if contains_valid_escape_sequence {
|
||||
// Escape with backslash.
|
||||
for invalid_escape_char in &invalid_escape_chars {
|
||||
@@ -239,7 +195,7 @@ fn check(
|
||||
r"\".to_string(),
|
||||
invalid_escape_char.start() + TextSize::from(1),
|
||||
)));
|
||||
diagnostics.push(diagnostic);
|
||||
invalid_escape_sequence.push(diagnostic);
|
||||
}
|
||||
} else {
|
||||
// Turn into raw string.
|
||||
@@ -252,12 +208,12 @@ fn check(
|
||||
invalid_escape_char.range(),
|
||||
);
|
||||
|
||||
if flags.is_u_string() {
|
||||
if kind.is_u_string() {
|
||||
// Replace the Unicode prefix with `r`.
|
||||
diagnostic.set_fix(Fix::safe_edit(Edit::replacement(
|
||||
"r".to_string(),
|
||||
expr_start,
|
||||
expr_start + TextSize::from(1),
|
||||
string_start_location,
|
||||
string_start_location + TextSize::from(1),
|
||||
)));
|
||||
} else {
|
||||
// Insert the `r` prefix.
|
||||
@@ -266,15 +222,17 @@ fn check(
|
||||
// `assert`, etc.) and the string. For example, `return"foo"` is valid, but
|
||||
// `returnr"foo"` is not.
|
||||
Fix::safe_edit(Edit::insertion(
|
||||
pad_start("r".to_string(), expr_start, locator),
|
||||
expr_start,
|
||||
pad_start("r".to_string(), string_start_location, locator),
|
||||
string_start_location,
|
||||
)),
|
||||
);
|
||||
}
|
||||
|
||||
diagnostics.push(diagnostic);
|
||||
invalid_escape_sequence.push(diagnostic);
|
||||
}
|
||||
}
|
||||
|
||||
diagnostics.extend(invalid_escape_sequence);
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
|
||||
@@ -52,7 +52,7 @@ pub(crate) fn missing_whitespace_after_keyword(
|
||||
let tok0_kind = tok0.kind();
|
||||
let tok1_kind = tok1.kind();
|
||||
|
||||
if tok0_kind.is_non_soft_keyword()
|
||||
if tok0_kind.is_keyword()
|
||||
&& !(tok0_kind.is_singleton()
|
||||
|| matches!(tok0_kind, TokenKind::Async | TokenKind::Await)
|
||||
|| tok0_kind == TokenKind::Except && tok1_kind == TokenKind::Star
|
||||
|
||||
@@ -198,7 +198,9 @@ pub(crate) fn missing_whitespace_around_operator(
|
||||
matches!(
|
||||
prev_kind,
|
||||
TokenKind::Rpar | TokenKind::Rsqb | TokenKind::Rbrace
|
||||
) || !(prev_kind.is_operator() || prev_kind.is_keyword())
|
||||
) || !(prev_kind.is_operator()
|
||||
|| prev_kind.is_keyword()
|
||||
|| prev_kind.is_soft_keyword())
|
||||
};
|
||||
|
||||
if is_binary {
|
||||
|
||||
@@ -445,7 +445,7 @@ impl LogicalLinesBuilder {
|
||||
|
||||
if matches!(kind, TokenKind::Comma | TokenKind::Semi | TokenKind::Colon) {
|
||||
line.flags.insert(TokenFlags::PUNCTUATION);
|
||||
} else if kind.is_non_soft_keyword() {
|
||||
} else if kind.is_keyword() {
|
||||
line.flags.insert(TokenFlags::KEYWORD);
|
||||
}
|
||||
|
||||
|
||||
@@ -127,8 +127,8 @@ pub(crate) fn whitespace_around_keywords(line: &LogicalLine, context: &mut Logic
|
||||
let mut after_keyword = false;
|
||||
|
||||
for token in line.tokens() {
|
||||
let is_non_soft_keyword = token.kind().is_non_soft_keyword();
|
||||
if is_non_soft_keyword {
|
||||
let is_keyword = token.kind().is_keyword();
|
||||
if is_keyword {
|
||||
if !after_keyword {
|
||||
match line.leading_whitespace(token) {
|
||||
(Whitespace::Tab, offset) => {
|
||||
@@ -184,6 +184,6 @@ pub(crate) fn whitespace_around_keywords(line: &LogicalLine, context: &mut Logic
|
||||
}
|
||||
}
|
||||
|
||||
after_keyword = is_non_soft_keyword;
|
||||
after_keyword = is_keyword;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use ruff_diagnostics::{AlwaysFixableViolation, Diagnostic, Edit, Fix};
|
||||
use ruff_macros::{derive_message_formats, violation};
|
||||
use ruff_python_parser::{TokenKind, TokenKindIter};
|
||||
use ruff_python_parser::lexer::LexResult;
|
||||
use ruff_python_parser::Tok;
|
||||
use ruff_text_size::{TextRange, TextSize};
|
||||
|
||||
/// ## What it does
|
||||
@@ -56,23 +57,23 @@ impl AlwaysFixableViolation for TooManyNewlinesAtEndOfFile {
|
||||
/// W391
|
||||
pub(crate) fn too_many_newlines_at_end_of_file(
|
||||
diagnostics: &mut Vec<Diagnostic>,
|
||||
tokens: TokenKindIter,
|
||||
lxr: &[LexResult],
|
||||
) {
|
||||
let mut num_trailing_newlines = 0u32;
|
||||
let mut start: Option<TextSize> = None;
|
||||
let mut end: Option<TextSize> = None;
|
||||
|
||||
// Count the number of trailing newlines.
|
||||
for (token, range) in tokens.rev() {
|
||||
match token {
|
||||
TokenKind::NonLogicalNewline | TokenKind::Newline => {
|
||||
for (tok, range) in lxr.iter().rev().flatten() {
|
||||
match tok {
|
||||
Tok::NonLogicalNewline | Tok::Newline => {
|
||||
if num_trailing_newlines == 0 {
|
||||
end = Some(range.end());
|
||||
}
|
||||
start = Some(range.end());
|
||||
num_trailing_newlines += 1;
|
||||
}
|
||||
TokenKind::Dedent => continue,
|
||||
Tok::Dedent => continue,
|
||||
_ => {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ mod tests {
|
||||
|
||||
use anyhow::Result;
|
||||
use regex::Regex;
|
||||
use ruff_python_parser::lexer::LexResult;
|
||||
|
||||
use test_case::test_case;
|
||||
|
||||
@@ -207,11 +208,7 @@ mod tests {
|
||||
#[test_case(Rule::UnusedVariable, Path::new("F841_4.py"))]
|
||||
#[test_case(Rule::UnusedImport, Path::new("__init__.py"))]
|
||||
#[test_case(Rule::UnusedImport, Path::new("F401_24/__init__.py"))]
|
||||
#[test_case(Rule::UnusedImport, Path::new("F401_25__all_nonempty/__init__.py"))]
|
||||
#[test_case(Rule::UnusedImport, Path::new("F401_26__all_empty/__init__.py"))]
|
||||
#[test_case(Rule::UnusedImport, Path::new("F401_27__all_mistyped/__init__.py"))]
|
||||
#[test_case(Rule::UnusedImport, Path::new("F401_28__all_multiple/__init__.py"))]
|
||||
#[test_case(Rule::UnusedImport, Path::new("F401_29__all_conditional/__init__.py"))]
|
||||
#[test_case(Rule::UnusedImport, Path::new("F401_25__all/__init__.py"))]
|
||||
fn preview_rules(rule_code: Rule, path: &Path) -> Result<()> {
|
||||
let snapshot = format!(
|
||||
"preview__{}_{}",
|
||||
@@ -594,7 +591,7 @@ mod tests {
|
||||
let source_type = PySourceType::default();
|
||||
let source_kind = SourceKind::Python(contents.to_string());
|
||||
let settings = LinterSettings::for_rules(Linter::Pyflakes.rules());
|
||||
let tokens = ruff_python_parser::tokenize(&contents, source_type.as_mode());
|
||||
let tokens: Vec<LexResult> = ruff_python_parser::tokenize(&contents, source_type.as_mode());
|
||||
let locator = Locator::new(&contents);
|
||||
let stylist = Stylist::from_tokens(&tokens, &locator);
|
||||
let indexer = Indexer::from_tokens(&tokens, &locator);
|
||||
|
||||
@@ -6,11 +6,8 @@ use rustc_hash::FxHashMap;
|
||||
|
||||
use ruff_diagnostics::{Applicability, Diagnostic, Fix, FixAvailability, Violation};
|
||||
use ruff_macros::{derive_message_formats, violation};
|
||||
use ruff_python_ast as ast;
|
||||
use ruff_python_ast::{Stmt, StmtImportFrom};
|
||||
use ruff_python_semantic::{
|
||||
AnyImport, BindingKind, Exceptions, Imported, NodeId, Scope, SemanticModel,
|
||||
};
|
||||
use ruff_python_semantic::{AnyImport, Exceptions, Imported, NodeId, Scope};
|
||||
use ruff_text_size::{Ranged, TextRange};
|
||||
|
||||
use crate::checkers::ast::Checker;
|
||||
@@ -21,10 +18,7 @@ use crate::rules::{isort, isort::ImportSection, isort::ImportType};
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
enum UnusedImportContext {
|
||||
ExceptHandler,
|
||||
Init {
|
||||
first_party: bool,
|
||||
dunder_all_count: usize,
|
||||
},
|
||||
Init { first_party: bool },
|
||||
}
|
||||
|
||||
/// ## What it does
|
||||
@@ -85,10 +79,7 @@ enum UnusedImportContext {
|
||||
/// - [Typing documentation: interface conventions](https://typing.readthedocs.io/en/latest/source/libraries.html#library-interface-public-and-private-symbols)
|
||||
#[violation]
|
||||
pub struct UnusedImport {
|
||||
/// Qualified name of the import
|
||||
name: String,
|
||||
/// Name of the import binding
|
||||
binding: String,
|
||||
context: Option<UnusedImportContext>,
|
||||
multiple: bool,
|
||||
}
|
||||
@@ -115,31 +106,16 @@ impl Violation for UnusedImport {
|
||||
}
|
||||
|
||||
fn fix_title(&self) -> Option<String> {
|
||||
let UnusedImport {
|
||||
name,
|
||||
binding,
|
||||
multiple,
|
||||
..
|
||||
} = self;
|
||||
match self.context {
|
||||
Some(UnusedImportContext::Init {
|
||||
first_party: true,
|
||||
dunder_all_count: 1,
|
||||
}) => Some(format!("Add unused import `{binding}` to __all__")),
|
||||
|
||||
Some(UnusedImportContext::Init {
|
||||
first_party: true,
|
||||
dunder_all_count: 0,
|
||||
}) => Some(format!(
|
||||
"Use an explicit re-export: `{binding} as {binding}`"
|
||||
)),
|
||||
|
||||
_ => Some(if *multiple {
|
||||
"Remove unused import".to_string()
|
||||
} else {
|
||||
format!("Remove unused import: `{name}`")
|
||||
}),
|
||||
}
|
||||
let UnusedImport { name, multiple, .. } = self;
|
||||
let resolution = match self.context {
|
||||
Some(UnusedImportContext::Init { first_party: true }) => "Use a redundant alias",
|
||||
_ => "Remove unused import",
|
||||
};
|
||||
Some(if *multiple {
|
||||
resolution.to_string()
|
||||
} else {
|
||||
format!("{resolution}: `{name}`")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,32 +138,9 @@ fn is_first_party(qualified_name: &str, level: u32, checker: &Checker) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
/// Find the `Expr` for top level `__all__` bindings.
|
||||
fn find_dunder_all_exprs<'a>(semantic: &'a SemanticModel) -> Vec<&'a ast::Expr> {
|
||||
semantic
|
||||
.global_scope()
|
||||
.get_all("__all__")
|
||||
.filter_map(|binding_id| {
|
||||
let binding = semantic.binding(binding_id);
|
||||
let stmt = match binding.kind {
|
||||
BindingKind::Export(_) => binding.statement(semantic),
|
||||
_ => None,
|
||||
}?;
|
||||
match stmt {
|
||||
Stmt::Assign(ast::StmtAssign { value, .. }) => Some(&**value),
|
||||
Stmt::AnnAssign(ast::StmtAnnAssign { value, .. }) => value.as_deref(),
|
||||
Stmt::AugAssign(ast::StmtAugAssign { value, .. }) => Some(&**value),
|
||||
_ => None,
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// For some unused binding in an import statement...
|
||||
///
|
||||
/// __init__.py ∧ 1stpty → safe, if one __all__, add to __all__
|
||||
/// safe, if no __all__, convert to redundant-alias
|
||||
/// n/a, if multiple __all__, offer no fix
|
||||
/// __init__.py ∧ 1stpty → safe, convert to redundant-alias
|
||||
/// __init__.py ∧ stdlib → unsafe, remove
|
||||
/// __init__.py ∧ 3rdpty → unsafe, remove
|
||||
///
|
||||
@@ -220,7 +173,6 @@ pub(crate) fn unused_import(checker: &Checker, scope: &Scope, diagnostics: &mut
|
||||
};
|
||||
|
||||
let import = ImportBinding {
|
||||
name: binding.name(checker.locator()),
|
||||
import,
|
||||
range: binding.range(),
|
||||
parent_range: binding.parent_range(checker.semantic()),
|
||||
@@ -245,7 +197,6 @@ pub(crate) fn unused_import(checker: &Checker, scope: &Scope, diagnostics: &mut
|
||||
|
||||
let in_init = checker.path().ends_with("__init__.py");
|
||||
let fix_init = checker.settings.preview.is_enabled();
|
||||
let dunder_all_exprs = find_dunder_all_exprs(checker.semantic());
|
||||
|
||||
// Generate a diagnostic for every import, but share fixes across all imports within the same
|
||||
// statement (excluding those that are ignored).
|
||||
@@ -274,7 +225,6 @@ pub(crate) fn unused_import(checker: &Checker, scope: &Scope, diagnostics: &mut
|
||||
level,
|
||||
checker,
|
||||
),
|
||||
dunder_all_count: dunder_all_exprs.len(),
|
||||
})
|
||||
} else {
|
||||
None
|
||||
@@ -284,10 +234,7 @@ pub(crate) fn unused_import(checker: &Checker, scope: &Scope, diagnostics: &mut
|
||||
.partition(|(_, context)| {
|
||||
matches!(
|
||||
context,
|
||||
Some(UnusedImportContext::Init {
|
||||
first_party: true,
|
||||
..
|
||||
})
|
||||
Some(UnusedImportContext::Init { first_party: true })
|
||||
)
|
||||
});
|
||||
|
||||
@@ -304,8 +251,7 @@ pub(crate) fn unused_import(checker: &Checker, scope: &Scope, diagnostics: &mut
|
||||
fix_by_reexporting(
|
||||
checker,
|
||||
import_statement,
|
||||
&to_reexport.iter().map(|(b, _)| b).collect::<Vec<_>>(),
|
||||
&dunder_all_exprs,
|
||||
to_reexport.iter().map(|(binding, _)| binding),
|
||||
)
|
||||
.ok(),
|
||||
)
|
||||
@@ -320,7 +266,6 @@ pub(crate) fn unused_import(checker: &Checker, scope: &Scope, diagnostics: &mut
|
||||
let mut diagnostic = Diagnostic::new(
|
||||
UnusedImport {
|
||||
name: binding.import.qualified_name().to_string(),
|
||||
binding: binding.name.to_string(),
|
||||
context,
|
||||
multiple,
|
||||
},
|
||||
@@ -340,17 +285,21 @@ pub(crate) fn unused_import(checker: &Checker, scope: &Scope, diagnostics: &mut
|
||||
|
||||
// Separately, generate a diagnostic for every _ignored_ import, to ensure that the
|
||||
// suppression comments aren't marked as unused.
|
||||
for binding in ignored.into_values().flatten() {
|
||||
for ImportBinding {
|
||||
import,
|
||||
range,
|
||||
parent_range,
|
||||
} in ignored.into_values().flatten()
|
||||
{
|
||||
let mut diagnostic = Diagnostic::new(
|
||||
UnusedImport {
|
||||
name: binding.import.qualified_name().to_string(),
|
||||
binding: binding.name.to_string(),
|
||||
name: import.qualified_name().to_string(),
|
||||
context: None,
|
||||
multiple: false,
|
||||
},
|
||||
binding.range,
|
||||
range,
|
||||
);
|
||||
if let Some(range) = binding.parent_range {
|
||||
if let Some(range) = parent_range {
|
||||
diagnostic.set_parent(range.start());
|
||||
}
|
||||
diagnostics.push(diagnostic);
|
||||
@@ -360,8 +309,6 @@ pub(crate) fn unused_import(checker: &Checker, scope: &Scope, diagnostics: &mut
|
||||
/// An unused import with its surrounding context.
|
||||
#[derive(Debug)]
|
||||
struct ImportBinding<'a> {
|
||||
/// Name of the binding, which for renamed imports will differ from the qualified name.
|
||||
name: &'a str,
|
||||
/// The qualified name of the import (e.g., `typing.List` for `from typing import List`).
|
||||
import: AnyImport<'a, 'a>,
|
||||
/// The trimmed range of the import (e.g., `List` in `from typing import List`).
|
||||
@@ -417,31 +364,23 @@ fn fix_by_removing_imports<'a>(
|
||||
)
|
||||
}
|
||||
|
||||
/// Generate a [`Fix`] to make bindings in a statement explicit, either by adding them to `__all__`
|
||||
/// or changing them from `import a` to `import a as a`.
|
||||
fn fix_by_reexporting(
|
||||
/// Generate a [`Fix`] to make bindings in a statement explicit, by changing from `import a` to
|
||||
/// `import a as a`.
|
||||
fn fix_by_reexporting<'a>(
|
||||
checker: &Checker,
|
||||
node_id: NodeId,
|
||||
imports: &[&ImportBinding],
|
||||
dunder_all_exprs: &[&ast::Expr],
|
||||
imports: impl Iterator<Item = &'a ImportBinding<'a>>,
|
||||
) -> Result<Fix> {
|
||||
let statement = checker.semantic().statement(node_id);
|
||||
if imports.is_empty() {
|
||||
|
||||
let member_names = imports
|
||||
.map(|binding| binding.import.member_name())
|
||||
.collect::<Vec<_>>();
|
||||
if member_names.is_empty() {
|
||||
bail!("Expected import bindings");
|
||||
}
|
||||
|
||||
let edits = match dunder_all_exprs {
|
||||
[] => fix::edits::make_redundant_alias(
|
||||
imports.iter().map(|b| b.import.member_name()),
|
||||
statement,
|
||||
),
|
||||
[dunder_all] => fix::edits::add_to_dunder_all(
|
||||
imports.iter().map(|b| b.name),
|
||||
dunder_all,
|
||||
checker.stylist(),
|
||||
),
|
||||
_ => bail!("Cannot offer a fix when there are multiple __all__ definitions"),
|
||||
};
|
||||
let edits = fix::edits::make_redundant_alias(member_names.iter().map(AsRef::as_ref), statement);
|
||||
|
||||
// Only emit a fix if there are edits
|
||||
let mut tail = edits.into_iter();
|
||||
|
||||
@@ -22,7 +22,7 @@ __init__.py:33:15: F401 [*] `.unused` imported but unused; consider removing, ad
|
||||
33 | from . import unused # F401: change to redundant alias
|
||||
| ^^^^^^ F401
|
||||
|
|
||||
= help: Use an explicit re-export: `unused as unused`
|
||||
= help: Use a redundant alias: `.unused`
|
||||
|
||||
ℹ Safe fix
|
||||
30 30 | from . import aliased as aliased # Ok: is redundant alias
|
||||
@@ -39,4 +39,4 @@ __init__.py:36:26: F401 `.renamed` imported but unused; consider removing, addin
|
||||
36 | from . import renamed as bees # F401: no fix
|
||||
| ^^^^ F401
|
||||
|
|
||||
= help: Use an explicit re-export: `bees as bees`
|
||||
= help: Use a redundant alias: `.renamed`
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
---
|
||||
source: crates/ruff_linter/src/rules/pyflakes/mod.rs
|
||||
---
|
||||
__init__.py:19:8: F401 [*] `sys` imported but unused; consider removing, adding to `__all__`, or using a redundant alias
|
||||
|
|
||||
19 | import sys # F401: remove unused
|
||||
| ^^^ F401
|
||||
|
|
||||
= help: Remove unused import: `sys`
|
||||
|
||||
ℹ Unsafe fix
|
||||
16 16 | import argparse # Ok: is exported in __all__
|
||||
17 17 |
|
||||
18 18 |
|
||||
19 |-import sys # F401: remove unused
|
||||
20 19 |
|
||||
21 20 |
|
||||
22 21 | # first-party
|
||||
@@ -1,46 +0,0 @@
|
||||
---
|
||||
source: crates/ruff_linter/src/rules/pyflakes/mod.rs
|
||||
---
|
||||
__init__.py:19:8: F401 [*] `sys` imported but unused; consider removing, adding to `__all__`, or using a redundant alias
|
||||
|
|
||||
19 | import sys # F401: remove unused
|
||||
| ^^^ F401
|
||||
|
|
||||
= help: Remove unused import: `sys`
|
||||
|
||||
ℹ Unsafe fix
|
||||
16 16 | import argparse # Ok: is exported in __all__
|
||||
17 17 |
|
||||
18 18 |
|
||||
19 |-import sys # F401: remove unused
|
||||
20 19 |
|
||||
21 20 |
|
||||
22 21 | # first-party
|
||||
|
||||
__init__.py:36:15: F401 [*] `.unused` imported but unused; consider removing, adding to `__all__`, or using a redundant alias
|
||||
|
|
||||
36 | from . import unused # F401: add to __all__
|
||||
| ^^^^^^ F401
|
||||
|
|
||||
= help: Add unused import `unused` to __all__
|
||||
|
||||
ℹ Safe fix
|
||||
39 39 | from . import renamed as bees # F401: add to __all__
|
||||
40 40 |
|
||||
41 41 |
|
||||
42 |-__all__ = ["argparse", "exported"]
|
||||
42 |+__all__ = ["argparse", "exported", "unused"]
|
||||
|
||||
__init__.py:39:26: F401 [*] `.renamed` imported but unused; consider removing, adding to `__all__`, or using a redundant alias
|
||||
|
|
||||
39 | from . import renamed as bees # F401: add to __all__
|
||||
| ^^^^ F401
|
||||
|
|
||||
= help: Add unused import `bees` to __all__
|
||||
|
||||
ℹ Safe fix
|
||||
39 39 | from . import renamed as bees # F401: add to __all__
|
||||
40 40 |
|
||||
41 41 |
|
||||
42 |-__all__ = ["argparse", "exported"]
|
||||
42 |+__all__ = ["argparse", "exported", "bees"]
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
source: crates/ruff_linter/src/rules/pyflakes/mod.rs
|
||||
---
|
||||
__init__.py:5:15: F401 [*] `.unused` imported but unused; consider removing, adding to `__all__`, or using a redundant alias
|
||||
|
|
||||
5 | from . import unused # F401: add to __all__
|
||||
| ^^^^^^ F401
|
||||
|
|
||||
= help: Add unused import `unused` to __all__
|
||||
|
||||
ℹ Safe fix
|
||||
8 8 | from . import renamed as bees # F401: add to __all__
|
||||
9 9 |
|
||||
10 10 |
|
||||
11 |-__all__ = []
|
||||
11 |+__all__ = ["unused"]
|
||||
|
||||
__init__.py:8:26: F401 [*] `.renamed` imported but unused; consider removing, adding to `__all__`, or using a redundant alias
|
||||
|
|
||||
8 | from . import renamed as bees # F401: add to __all__
|
||||
| ^^^^ F401
|
||||
|
|
||||
= help: Add unused import `bees` to __all__
|
||||
|
||||
ℹ Safe fix
|
||||
8 8 | from . import renamed as bees # F401: add to __all__
|
||||
9 9 |
|
||||
10 10 |
|
||||
11 |-__all__ = []
|
||||
11 |+__all__ = ["bees"]
|
||||
@@ -1,16 +0,0 @@
|
||||
---
|
||||
source: crates/ruff_linter/src/rules/pyflakes/mod.rs
|
||||
---
|
||||
__init__.py:5:15: F401 `.unused` imported but unused; consider removing, adding to `__all__`, or using a redundant alias
|
||||
|
|
||||
5 | from . import unused # F401: recommend add to all w/o fix
|
||||
| ^^^^^^ F401
|
||||
|
|
||||
= help: Add unused import `unused` to __all__
|
||||
|
||||
__init__.py:8:26: F401 `.renamed` imported but unused; consider removing, adding to `__all__`, or using a redundant alias
|
||||
|
|
||||
8 | from . import renamed as bees # F401: recommend add to all w/o fix
|
||||
| ^^^^ F401
|
||||
|
|
||||
= help: Add unused import `bees` to __all__
|
||||
@@ -1,30 +0,0 @@
|
||||
---
|
||||
source: crates/ruff_linter/src/rules/pyflakes/mod.rs
|
||||
---
|
||||
__init__.py:5:15: F401 [*] `.unused` imported but unused; consider removing, adding to `__all__`, or using a redundant alias
|
||||
|
|
||||
5 | from . import unused, renamed as bees # F401: add to __all__
|
||||
| ^^^^^^ F401
|
||||
|
|
||||
= help: Add unused import `unused` to __all__
|
||||
|
||||
ℹ Safe fix
|
||||
5 5 | from . import unused, renamed as bees # F401: add to __all__
|
||||
6 6 |
|
||||
7 7 |
|
||||
8 |-__all__ = [];
|
||||
8 |+__all__ = ["bees", "unused"];
|
||||
|
||||
__init__.py:5:34: F401 [*] `.renamed` imported but unused; consider removing, adding to `__all__`, or using a redundant alias
|
||||
|
|
||||
5 | from . import unused, renamed as bees # F401: add to __all__
|
||||
| ^^^^ F401
|
||||
|
|
||||
= help: Add unused import `bees` to __all__
|
||||
|
||||
ℹ Safe fix
|
||||
5 5 | from . import unused, renamed as bees # F401: add to __all__
|
||||
6 6 |
|
||||
7 7 |
|
||||
8 |-__all__ = [];
|
||||
8 |+__all__ = ["bees", "unused"];
|
||||
@@ -1,24 +0,0 @@
|
||||
---
|
||||
source: crates/ruff_linter/src/rules/pyflakes/mod.rs
|
||||
---
|
||||
__init__.py:8:15: F401 `.unused` imported but unused; consider removing, adding to `__all__`, or using a redundant alias
|
||||
|
|
||||
6 | import sys
|
||||
7 |
|
||||
8 | from . import unused, exported, renamed as bees
|
||||
| ^^^^^^ F401
|
||||
9 |
|
||||
10 | if sys.version_info > (3, 9):
|
||||
|
|
||||
= help: Remove unused import
|
||||
|
||||
__init__.py:8:44: F401 `.renamed` imported but unused; consider removing, adding to `__all__`, or using a redundant alias
|
||||
|
|
||||
6 | import sys
|
||||
7 |
|
||||
8 | from . import unused, exported, renamed as bees
|
||||
| ^^^^ F401
|
||||
9 |
|
||||
10 | if sys.version_info > (3, 9):
|
||||
|
|
||||
= help: Remove unused import
|
||||
@@ -2,13 +2,14 @@ use std::str::FromStr;
|
||||
|
||||
use ruff_diagnostics::{Diagnostic, Violation};
|
||||
use ruff_macros::{derive_message_formats, violation};
|
||||
use ruff_python_ast::{AnyStringFlags, Expr, ExprStringLiteral};
|
||||
use ruff_python_ast::{AnyStringKind, Expr};
|
||||
use ruff_python_literal::{
|
||||
cformat::{CFormatErrorType, CFormatString},
|
||||
format::FormatPart,
|
||||
format::FromTemplate,
|
||||
format::{FormatSpec, FormatSpecError, FormatString},
|
||||
};
|
||||
use ruff_python_parser::{lexer, Mode, Tok};
|
||||
use ruff_text_size::{Ranged, TextRange};
|
||||
|
||||
use crate::checkers::ast::Checker;
|
||||
@@ -89,12 +90,29 @@ pub(crate) fn call(checker: &mut Checker, string: &str, range: TextRange) {
|
||||
|
||||
/// PLE1300
|
||||
/// Ex) `"%z" % "1"`
|
||||
pub(crate) fn percent(checker: &mut Checker, expr: &Expr, format_string: &ExprStringLiteral) {
|
||||
for string_literal in &format_string.value {
|
||||
let string = checker.locator().slice(string_literal);
|
||||
let flags = AnyStringFlags::from(string_literal.flags);
|
||||
pub(crate) fn percent(checker: &mut Checker, expr: &Expr) {
|
||||
// Grab each string segment (in case there's an implicit concatenation).
|
||||
let mut strings: Vec<(TextRange, AnyStringKind)> = vec![];
|
||||
for (tok, range) in
|
||||
lexer::lex_starts_at(checker.locator().slice(expr), Mode::Module, expr.start()).flatten()
|
||||
{
|
||||
match tok {
|
||||
Tok::String { kind, .. } => strings.push((range, kind)),
|
||||
// Break as soon as we find the modulo symbol.
|
||||
Tok::Percent => break,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
// If there are no string segments, abort.
|
||||
if strings.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
for (range, kind) in &strings {
|
||||
let string = checker.locator().slice(*range);
|
||||
let string = &string
|
||||
[usize::from(flags.opener_len())..(string.len() - usize::from(flags.closer_len()))];
|
||||
[usize::from(kind.opener_len())..(string.len() - usize::from(kind.closer_len()))];
|
||||
|
||||
// Parse the format string (e.g. `"%s"`) into a list of `PercentFormat`.
|
||||
if let Err(format_error) = CFormatString::from_str(string) {
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use ruff_python_ast::{self as ast, AnyStringFlags, Expr};
|
||||
use ruff_python_ast::{self as ast, AnyStringKind, Expr};
|
||||
use ruff_python_literal::cformat::{CFormatPart, CFormatSpec, CFormatStrOrBytes, CFormatString};
|
||||
use ruff_text_size::Ranged;
|
||||
use ruff_python_parser::{lexer, AsMode, Tok};
|
||||
use ruff_text_size::{Ranged, TextRange};
|
||||
use rustc_hash::FxHashMap;
|
||||
|
||||
use ruff_diagnostics::{Diagnostic, Violation};
|
||||
@@ -210,16 +211,30 @@ fn is_valid_dict(formats: &[CFormatStrOrBytes<String>], items: &[ast::DictItem])
|
||||
}
|
||||
|
||||
/// PLE1307
|
||||
pub(crate) fn bad_string_format_type(
|
||||
checker: &mut Checker,
|
||||
bin_op: &ast::ExprBinOp,
|
||||
format_string: &ast::ExprStringLiteral,
|
||||
) {
|
||||
pub(crate) fn bad_string_format_type(checker: &mut Checker, expr: &Expr, right: &Expr) {
|
||||
// Grab each string segment (in case there's an implicit concatenation).
|
||||
let content = checker.locator().slice(expr);
|
||||
let mut strings: Vec<(TextRange, AnyStringKind)> = vec![];
|
||||
for (tok, range) in
|
||||
lexer::lex_starts_at(content, checker.source_type.as_mode(), expr.start()).flatten()
|
||||
{
|
||||
match tok {
|
||||
Tok::String { kind, .. } => strings.push((range, kind)),
|
||||
// Break as soon as we find the modulo symbol.
|
||||
Tok::Percent => break,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
// If there are no string segments, abort.
|
||||
if strings.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Parse each string segment.
|
||||
let mut format_strings = vec![];
|
||||
for string_literal in &format_string.value {
|
||||
let string = checker.locator().slice(string_literal);
|
||||
let flags = AnyStringFlags::from(string_literal.flags);
|
||||
for (range, flags) in &strings {
|
||||
let string = checker.locator().slice(*range);
|
||||
let quote_len = usize::from(flags.quote_len());
|
||||
let string =
|
||||
&string[(usize::from(flags.prefix_len()) + quote_len)..(string.len() - quote_len)];
|
||||
@@ -231,14 +246,14 @@ pub(crate) fn bad_string_format_type(
|
||||
}
|
||||
|
||||
// Parse the parameters.
|
||||
let is_valid = match &*bin_op.right {
|
||||
let is_valid = match right {
|
||||
Expr::Tuple(ast::ExprTuple { elts, .. }) => is_valid_tuple(&format_strings, elts),
|
||||
Expr::Dict(ast::ExprDict { items, range: _ }) => is_valid_dict(&format_strings, items),
|
||||
_ => is_valid_constant(&format_strings, &bin_op.right),
|
||||
_ => is_valid_constant(&format_strings, right),
|
||||
};
|
||||
if !is_valid {
|
||||
checker
|
||||
.diagnostics
|
||||
.push(Diagnostic::new(BadStringFormatType, bin_op.range()));
|
||||
.push(Diagnostic::new(BadStringFormatType, expr.range()));
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user