Compare commits

..

133 Commits

Author SHA1 Message Date
Charlie Marsh
8cb76f85eb Bump version to 0.0.264 (#4179) 2023-05-01 23:33:38 -07:00
Charlie Marsh
56c45013c2 Allow boolean parameters for pytest.param (#4176) 2023-05-02 01:07:50 +00:00
Calum Young
a4ce746892 Reference related settings in rules (#4157) 2023-05-02 00:59:00 +00:00
Calum Young
2d6d51f3a1 Add flake8-return docs (#4164) 2023-05-02 00:53:46 +00:00
Jonathan Plasse
814731364a Fix UP032 auto-fix (#4165) 2023-04-30 16:57:41 -04:00
Jonathan Plasse
8c97e7922b Fix F811 false positive with match (#4161) 2023-04-30 14:39:45 -04:00
Jonathan Plasse
a32617911a Use --filter=blob:none to clone CPython faster (#4156) 2023-04-30 13:39:22 +02:00
Charlie Marsh
64b7280eb8 Respect parent-scoping rules for NamedExpr assignments (#4145) 2023-04-29 22:45:30 +00:00
Evan Rittenhouse
8d64747d34 Remove pyright comment prefix from PYI033 checks (#4152) 2023-04-29 18:41:04 -04:00
Charlie Marsh
2115d99c43 Remove ScopeStack in favor of child-parent ScopeId pointers (#4138) 2023-04-29 18:23:51 -04:00
Calum Young
39ed75f643 Document flake8-unused-arguments (#4147) 2023-04-29 19:17:50 +00:00
Calum Young
8f61eae1e7 Add remaining pep8-naming docs (#4149) 2023-04-29 15:13:10 -04:00
Calum Young
f0f4bf2929 Move typos to pre-commit config (#4148) 2023-04-29 12:13:35 -04:00
Calum Young
03144b2fad Document flake8-commas (#4142) 2023-04-29 03:24:15 +00:00
Calum Young
0172cc51a7 Document flake8-print (#4144) 2023-04-29 03:19:00 +00:00
Calum Young
12d64a223b Document RUF100 (#4141) 2023-04-28 22:14:15 +00:00
Charlie Marsh
432ea6f2e2 Tweak rule documentation for B008 (#4137) 2023-04-28 01:29:03 +00:00
Evan Rittenhouse
b34804ceb5 Make D410/D411 autofixes mutually exclusive (#4110) 2023-04-28 01:24:35 +00:00
Moritz Sauter
ee6d8f7467 Add bugbear immutable functions as allowed in dataclasses (#4122) 2023-04-27 21:23:06 -04:00
Dhruv Manilawala
089b64e9c1 Autofix EM101, EM102, EM103 if possible (#4123) 2023-04-27 18:53:27 +00:00
Tom Kuson
3e81403fbe Add pygrep-hooks documentation (#4131) 2023-04-27 18:33:07 +00:00
Charlie Marsh
3c9f5e2fdc Preserve star-handling special-casing for force-single-line (#4129) 2023-04-27 00:02:17 -04:00
Micha Reiser
17db2e2a62 Fix B023 shadowed variables in nested functions (#4111) 2023-04-26 22:01:31 +01:00
Micha Reiser
e04ef42334 Use memchr to speedup newline search on x86 (#3985) 2023-04-26 20:15:47 +01:00
Micha Reiser
f3e6ddda62 perf(logical-lines): Various small perf improvements (#4022) 2023-04-26 20:10:35 +01:00
Micha Reiser
cab65b25da Replace row/column based Location with byte-offsets. (#3931) 2023-04-26 18:11:02 +00:00
Charlie Marsh
ee91598835 Tweak --show-fixes documentation (#4117) 2023-04-26 15:15:56 +00:00
Calum Young
ab65eaea7f Add docs build validation stage to CI (#4116)
Nice. Thank you
2023-04-26 14:57:59 +01:00
konstin
19d8913e32 Use musl in ecosystem docker (#3998)
This prevents errors when the host glibc is newer than the one in the docker container
2023-04-26 05:54:53 +02:00
Dhruv Manilawala
b9c06b48e1 Document that --diff implies --fix-only (#4098) 2023-04-25 21:19:44 -06:00
Charlie Marsh
7266eb0d69 Add support for providing command-line arguments via argfile (#4087) 2023-04-25 17:58:21 -06:00
Jonathan Plasse
4df7bc0bcd Fix E713 and E714 false positives for multiple comparisons (#4083) 2023-04-25 11:37:56 -06:00
Calum Young
464a0ff483 Fix docs failure (#4097) 2023-04-25 11:30:37 -06:00
Charlie Marsh
fd7ccb4c9e Bump version to 0.0.263 (#4086) 2023-04-24 23:32:29 -06:00
Evan Rittenhouse
ae6f38344a Unify positional and keyword arguments when checking for missing arguments in docstring (#4067) 2023-04-25 05:32:15 +00:00
Trevor McCulloch
bbf658d4c5 [pylint] Implement PLE0302 unexpected-special-method-signature (#4075) 2023-04-25 04:51:21 +00:00
Jonathan Plasse
1f3b0fd602 Fix SIM222 and SIM223 false positives and auto-fix (#4063) 2023-04-25 04:44:02 +00:00
Dhruv Manilawala
37483f3ac9 Ignore ClassVar annotation for RUF008, RUF009 (#4081) 2023-04-24 23:58:30 +00:00
Zanie Adkins
4d3a1e0581 Add PrefectHQ/prefect to list of ruff users (#4084) 2023-04-24 17:49:12 -06:00
Bartosz Sokorski
9e5f348a17 Add Poetry to the list of projects using Ruff (#4085) 2023-04-24 17:48:35 -06:00
Jonathan Plasse
5e91211e6d Add in_boolean_test to Context (#4072) 2023-04-23 23:18:23 -06:00
Jonathan Plasse
df77595426 Move Truthiness into ruff_python_ast (#4071) 2023-04-24 04:54:31 +00:00
Charlie Marsh
407af6e0ae Avoid infinite-propagation of inline comments when force-splitting imports (#4074) 2023-04-23 22:39:51 -06:00
Dhruv Manilawala
d64146683e Increment priority should be (branch-local, global) (#4070) 2023-04-23 00:04:15 -06:00
Charlie Marsh
0e7914010f Misc. small clean-up of flake8-import-conventions rules (#4069) 2023-04-23 04:57:15 +00:00
Edgar R. M
cfc7d8a2b5 [flake8-import-conventions] Implement new rule ICN003 to ban from ... import ... for selected modules (#4040) 2023-04-23 04:40:36 +00:00
Tom Kuson
f5cd659292 Add docs for tryceratops rules (#4042) 2023-04-23 04:35:56 +00:00
Charlie Marsh
260138b427 Use Context for pep8-naming helpers (#4068) 2023-04-22 18:44:54 -04:00
Jonathan Plasse
2da149fd7e Ignore N815 for TypedDict fields (#4066) 2023-04-22 18:17:14 -04:00
Micha Reiser
e33887718d Use Rust 1.69 (#4065) 2023-04-22 23:04:17 +01:00
Micha Reiser
ba4f4f4672 Upgrade dependencies (#4064) 2023-04-22 18:04:01 +01:00
Pronoy Mandal
b7a57ce120 Update tutorial.md (#4055) 2023-04-21 10:56:31 -06:00
Alan Du
82abbc7234 [flake8-bugbear] Add pytest.raises(Exception) support to B017 (#4052) 2023-04-21 03:43:01 +00:00
Dhruv Manilawala
ba98149022 Avoid RUF008 if field annotation is immutable (#4039) 2023-04-20 16:02:12 -04:00
Dhruv Manilawala
7fd44a3e12 Avoid PYI015 for valid default value without annotation (#4043) 2023-04-20 15:45:47 -04:00
Evan Rittenhouse
6e8d561090 Support --fix in watch mode (#4035) 2023-04-19 23:33:12 -04:00
Jacob Coffee
cb762f4cad Add Astral announcement to README (#4010) 2023-04-19 20:28:45 +00:00
Charlie Marsh
eed6866b7e Add relative-path tests for banned-api (#4033) 2023-04-19 16:04:22 -04:00
Charlie Marsh
25a6bfa9ee Bump version to 0.0.262 (#4032) 2023-04-19 15:49:28 -04:00
Charlie Marsh
b3f8f2a5c1 Remove TODO in handle_node_store (#4031) 2023-04-19 15:28:56 -04:00
Charlie Marsh
cc8b5a543b Ignore stub file assignments to value-requiring targets (#4030) 2023-04-19 15:26:00 -04:00
Charlie Marsh
10d5415bcb Ignore certain flake8-pyi errors within function bodies (#4029) 2023-04-19 15:10:29 -04:00
Charlie Marsh
827cbe7f97 Treat non-future function annotations as required-at-runtime (#4028) 2023-04-19 14:43:55 -04:00
Charlie Marsh
0d84517fbc Use module path resolver for relative autofix (#4027) 2023-04-19 14:43:45 -04:00
Charlie Marsh
7fa1da20fb Support relative imports in banned-api enforcement (#4025) 2023-04-19 14:30:13 -04:00
Francesco Nuzzo
f13a161ead remove unnecessary f-string formatting (#4026) 2023-04-19 18:14:33 +00:00
Charlie Marsh
c4cda301aa Ignore relative imports in banned-api rules (#4024) 2023-04-19 13:30:08 -04:00
Charlie Marsh
13fda30051 Refactor flake8_tidy_imports rules to consistently take Checker (#4023) 2023-04-19 16:42:15 +00:00
Micha Reiser
a3146ab1ca Fix (doc-)line-too-long start location (#4006) 2023-04-19 08:42:28 +02:00
Micha Reiser
c0cf87356e Set non-empty range for indentation diagnostics (#4005) 2023-04-18 16:26:13 +02:00
Andrei Grazhdankov
6c3e4ef441 Add Robyn to user list (#4008) 2023-04-18 09:51:20 -04:00
Charlie Marsh
6c038830a8 Ignore argument assignments when enforcing RET504 (#4004) 2023-04-18 03:22:38 +00:00
Charlie Marsh
064a293b80 Fix defaults for section-order (#4003) 2023-04-18 03:00:17 +00:00
Charlie Marsh
79c47e29ee Avoid short-circuiting when detecting RET rules (#4002) 2023-04-17 22:52:26 -04:00
Charlie Marsh
be87a29a9d Respect typing-modules when evaluating no-return functions (#4001) 2023-04-17 20:25:44 +00:00
Micha Reiser
280dffb5e1 Add parser benchmark (#3990) 2023-04-17 16:43:59 +02:00
Charlie Marsh
336993ea06 Change Alpha trove classifier to Beta (#3995) 2023-04-17 13:55:49 +00:00
Tom Kuson
516cb10000 Add more documentation for flake8-type-checking (#3994) 2023-04-17 09:51:54 -04:00
Charlie Marsh
1cdd5e3424 Remove autofix behavior for uncapitalized-environment-variables (SIM112) (#3988) 2023-04-16 23:19:05 +00:00
Dhruv Manilawala
bd78c6ade2 Preserve type annotations when fixing E731 (#3983) 2023-04-16 23:15:38 +00:00
Dhruv Manilawala
5ce35faa86 Do not consider nested comment as part of code (#3984) 2023-04-16 19:11:01 -04:00
Justin Chu
484b572e6b Add ONNX Runtime to user list (#3982) 2023-04-16 18:21:46 -04:00
Charlie Marsh
81805a45f0 Add some additional users (#3975) 2023-04-14 12:41:22 -04:00
Charlie Marsh
c457752f36 Redirect PIE802 to C419 (#3971) 2023-04-13 22:12:32 -04:00
Charlie Marsh
289289bfd3 Implement unnecessary-literal-within-dict-call (C418) (#3969) 2023-04-14 01:39:35 +00:00
Charlie Marsh
09274307e8 Add multi-edit change to BREAKING_CHANGES.md (#3968) 2023-04-13 23:12:00 +00:00
Charlie Marsh
d8718dcf54 Remove extraneous debug and TODO (#3967) 2023-04-13 18:45:18 -04:00
Charlie Marsh
fb9eeba422 Move user-defined section validation into Settings (#3966) 2023-04-13 22:40:05 +00:00
Paul
2d2630ef07 Implement isort custom sections and ordering (#2419) (#3900) 2023-04-13 21:28:22 +00:00
Charlie Marsh
1f22e035e3 Add 'or if cond' to E712 message (#3962) 2023-04-13 19:02:23 +00:00
Rob Young
a6a7584d79 Implement flake8-bandit shell injection rules (#3924) 2023-04-13 14:45:27 -04:00
Charlie Marsh
ffac4f6ec3 Ignore assert errors (S101) in TYPE_CHECKING blocks (#3960) 2023-04-13 18:20:44 +00:00
Dhruv Manilawala
032a84b167 Check for parenthesis in implicit str concat in PT006 (#3955) 2023-04-13 17:56:18 +00:00
Charlie Marsh
3357aaef4b Add docs for assert rule (S101) (#3959) 2023-04-13 13:43:00 -04:00
Charlie Marsh
d9ed43d112 Clarify some isort differences in FAQ (#3954) 2023-04-13 04:05:28 +00:00
Charlie Marsh
e160a52bfd Raise percent-format upgrade rule (UP031) for hanging modulos (#3953) 2023-04-12 23:59:20 -04:00
Charlie Marsh
9067ae47d1 Allow typing_extensions.TypeVar assignments in .pyi files (#3951) 2023-04-12 17:30:15 -04:00
Charlie Marsh
71e807b3be Add Prefect to user list (#3949) 2023-04-12 12:09:36 -04:00
Charlie Marsh
1e2df07544 Use identifier range for pytest rules (#3948) 2023-04-12 15:28:25 +00:00
USER-5
860841468c [flake8-pyi] Implement duplicate types in unions (PYI016) (#3922) 2023-04-12 04:06:09 +00:00
Charlie Marsh
ed4ecc3255 Remove unused import (#3944) 2023-04-12 03:55:38 +00:00
Charlie Marsh
b999e4b1e2 Allow users to extend the set of included files via include (#3914) 2023-04-11 23:39:43 -04:00
Charlie Marsh
8ce227047d Tidy up some pygrep-hooks rules (#3942) 2023-04-12 03:35:15 +00:00
Daniel Stancl
523515f936 [flake8-import-conventions] Add a rule for BannedImportAlias (#3926) 2023-04-12 03:29:24 +00:00
Charlie Marsh
10da3bc8dd Support pyright: ignore comments (#3941) 2023-04-12 03:10:29 +00:00
Charlie Marsh
eb0dd74040 Avoid adding required imports to stub files (#3940) 2023-04-11 22:31:20 -04:00
Micha Reiser
61200d2171 lint snapshots: Use filename only to avoid platform specific separators (#3930) 2023-04-11 11:40:51 +02:00
Micha Reiser
e8aebee3f6 Pretty print Diagnostics in snapshot tests (#3906) 2023-04-11 09:03:00 +00:00
Micha Reiser
210083bdd8 Order Edits by Locations (#3905) 2023-04-11 08:56:41 +00:00
Micha Reiser
c33c9dc585 Introduce SourceFile to avoid cloning the message filename (#3904) 2023-04-11 08:28:55 +00:00
Micha Reiser
056c212975 Render code frame with context (#3901) 2023-04-11 10:22:11 +02:00
Micha Reiser
381203c084 Store source code on message (#3897) 2023-04-11 07:57:36 +00:00
Micha Reiser
76c47a9a43 Cheap cloneable LineIndex (#3896) 2023-04-11 07:33:40 +00:00
Micha Reiser
9209e57c5a Extract message emitters from Printer (#3895) 2023-04-11 07:24:25 +00:00
Leiser Fernández Gallo
333f1bd9ce Extend SIM105 to match also 'Ellipsis only' bodies in exception handlers (#3925) 2023-04-10 09:55:02 -04:00
Leiser Fernández Gallo
002caadf9e [flake8-simplify] Add autofix for contextlib.suppress (SIM105) (#3915) 2023-04-09 22:45:19 +00:00
Dhruv Manilawala
311ba29d0f Do not skip analysis if *args present for F523 (#3923) 2023-04-09 18:34:52 -04:00
Dhruv Manilawala
237a64d922 Check for arguments in inner/outer call for C414 (#3916) 2023-04-09 18:33:11 -04:00
Moritz Sauter
d4af2dd5cf [ruff] Add checks for mutable defaults in dataclasses (#3877) 2023-04-09 02:46:28 +00:00
Charlie Marsh
a36ce585ce Remove extract_path_names helper (#3920) 2023-04-08 11:14:42 -04:00
Charlie Marsh
29ec6df24f Avoid N802 violations for @override methods (#3912) 2023-04-08 03:11:50 +00:00
Evan Rittenhouse
8b17508ef1 Remove old documentation (#3911) 2023-04-07 22:51:19 -04:00
Evan Rittenhouse
abaf0a198d Ensure that tab characters aren't in multi-line strings before throwing a violation (#3837) 2023-04-06 22:25:40 -04:00
konstin
454c6d9c2f Extended ecosystem check with scraped data (#3858) 2023-04-06 22:39:48 +00:00
konstin
cae5503e34 [pylint] Fix unicode handling in PLE2515 (#3898) 2023-04-06 13:54:52 -04:00
Dhruv Manilawala
34e9786a41 Visit comprehension to detect group name usage/overrides (#3887) 2023-04-05 18:03:11 -04:00
Dhruv Manilawala
5467d45dfa Ignore PLW2901 when using typing cast (#3891) 2023-04-05 18:02:32 -04:00
Charlie Marsh
ac87137c1c Avoid printing docs on cargo dev generate-all (#3890) 2023-04-05 14:18:33 -04:00
Charlie Marsh
e0bccfd2d9 Allow legacy C and T selectors in JSON schema (#3889) 2023-04-05 17:58:36 +00:00
Tom Kuson
7b6e55a2e0 Add documentation for flake8-type-checking (#3886) 2023-04-05 17:30:25 +00:00
brucearctor
5c374b5793 Consistent Style/Levels in Usage (#3884) 2023-04-05 03:06:43 +00:00
Edgar R. M
ffdd0de522 Add Meltano to users (#3883) 2023-04-04 23:05:53 -04:00
Charlie Marsh
5370968839 Add some additional users and alphabetize (#3882) 2023-04-05 02:40:02 +00:00
1485 changed files with 75531 additions and 76564 deletions

View File

@@ -12,3 +12,6 @@ indent_size = 2
[*.{rs,py}]
indent_size = 4
[*.snap]
trim_trailing_whitespace = false

View File

@@ -121,15 +121,6 @@ jobs:
- run: cargo check
- run: cargo fmt --all --check
typos:
name: "spell check"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: crate-ci/typos@master
with:
files: .
ecosystem:
name: "ecosystem"
runs-on: ubuntu-latest
@@ -230,3 +221,21 @@ jobs:
exit_code=${PIPESTATUS[0]}
echo '```' >> $GITHUB_STEP_SUMMARY
exit $exit_code
docs:
name: "mkdocs"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
- name: "Install dependencies"
run: pip install -r docs/requirements.txt
- name: "Update README File"
run: python scripts/transform_readme.py --target mkdocs
- name: "Generate docs"
run: python scripts/generate_mkdocs.py
- name: "Build docs"
run: mkdocs build --strict

1
.gitignore vendored
View File

@@ -3,6 +3,7 @@
crates/ruff/resources/test/cpython
mkdocs.yml
.overrides
github_search.jsonl
###
# Rust.gitignore

View File

@@ -23,6 +23,11 @@ repos:
- MD033 # no-inline-html
- --
- repo: https://github.com/crate-ci/typos
rev: v1.14.8
hooks:
- id: typos
- repo: local
hooks:
- id: cargo-fmt

View File

@@ -1,5 +1,40 @@
# Breaking Changes
## 0.0.260
### Fixes are now represented as a list of edits ([#3709](https://github.com/charliermarsh/ruff/pull/3709))
Previously, Ruff represented each fix as a single edit, which prohibited Ruff from automatically
fixing violations that required multiple edits across a file. As such, Ruff now represents each
fix as a list of edits.
This primarily affects the JSON API. Ruff's JSON representation used to represent the `fix` field as
a single edit, like so:
```json
{
"message": "Remove unused import: `sys`",
"content": "",
"location": {"row": 1, "column": 0},
"end_location": {"row": 2, "column": 0}
}
```
The updated representation instead includes a list of edits:
```json
{
"message": "Remove unused import: `sys`",
"edits": [
{
"content": "",
"location": {"row": 1, "column": 0},
"end_location": {"row": 2, "column": 0},
}
]
}
```
## 0.0.246
### `multiple-statements-on-one-line-def` (`E704`) was removed ([#2773](https://github.com/charliermarsh/ruff/pull/2773))

View File

@@ -116,8 +116,7 @@ At a high level, the steps involved in adding a new lint rule are as follows:
To define the violation, start by creating a dedicated file for your rule under the appropriate
rule linter (e.g., `crates/ruff/src/rules/flake8_bugbear/rules/abstract_base_class.rs`). That file should
contain a struct defined via `#[violation]`, along with a function that creates the violation
based on any required inputs. (Many of the existing examples live in `crates/ruff/src/violations.rs`,
but we're looking to place new rules in their own files.)
based on any required inputs.
To trigger the violation, you'll likely want to augment the logic in `crates/ruff/src/checkers/ast.rs`,
which defines the Python AST visitor, responsible for iterating over the abstract syntax tree and
@@ -215,6 +214,20 @@ them to [PyPI](https://pypi.org/project/ruff/).
Ruff follows the [semver](https://semver.org/) versioning standard. However, as pre-1.0 software,
even patch releases may contain [non-backwards-compatible changes](https://semver.org/#spec-item-4).
## Ecosystem CI
GitHub Actions will run your changes against a number of real-world projects from GitHub and
report on any diagnostic differences. You can also run those checks locally via:
```shell
python scripts/check_ecosystem.py path/to/your/ruff path/to/older/ruff
```
You can also run the Ecosystem CI check in a Docker container across a larger set of projects by
downloading the [`known-github-tomls.json`](https://github.com/akx/ruff-usage-aggregate/blob/master/data/known-github-tomls.jsonl)
as `github_search.jsonl` and following the instructions in [scripts/Dockerfile.ecosystem](https://github.com/charliermarsh/ruff/blob/main/scripts/Dockerfile.ecosystem).
Note that this check will take a while to run.
## Benchmarks
First, clone [CPython](https://github.com/python/cpython). It's a large and diverse Python codebase,

914
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,7 @@ members = ["crates/*"]
[workspace.package]
edition = "2021"
rust-version = "1.67"
rust-version = "1.69"
homepage = "https://beta.ruff.rs/docs/"
documentation = "https://beta.ruff.rs/docs/"
repository = "https://github.com/charliermarsh/ruff"
@@ -11,7 +11,7 @@ authors = ["Charlie Marsh <charlie.r.marsh@gmail.com>"]
[workspace.dependencies]
anyhow = { version = "1.0.69" }
bitflags = { version = "1.3.2" }
bitflags = { version = "2.1.0" }
chrono = { version = "0.4.23", default-features = false, features = ["clock"] }
clap = { version = "4.1.8", features = ["derive"] }
colored = { version = "2.0.0" }
@@ -30,12 +30,10 @@ path-absolutize = { version = "3.0.14" }
proc-macro2 = { version = "1.0.51" }
quote = { version = "1.0.23" }
regex = { version = "1.7.1" }
ruff_text_size = { git = "https://github.com/charliermarsh/RustPython.git", rev = "c3147d2c1524ebd0e90cf1c2938d770314fd5a5a" }
rustc-hash = { version = "1.1.0" }
rustpython-common = { git = "https://github.com/RustPython/RustPython.git", rev = "c15f670f2c30cfae6b41a1874893590148c74bc4" }
rustpython-parser = { features = [
"lalrpop",
"serde",
], git = "https://github.com/RustPython/RustPython.git", rev = "c15f670f2c30cfae6b41a1874893590148c74bc4" }
rustpython-common = { git = "https://github.com/charliermarsh/RustPython.git", rev = "c3147d2c1524ebd0e90cf1c2938d770314fd5a5a" }
rustpython-parser = { git = "https://github.com/charliermarsh/RustPython.git", rev = "c3147d2c1524ebd0e90cf1c2938d770314fd5a5a" }
schemars = { version = "0.8.12" }
serde = { version = "1.0.152", features = ["derive"] }
serde_json = { version = "1.0.93", features = ["preserve_order"] }
@@ -44,7 +42,7 @@ similar = { version = "2.2.1" }
smallvec = { version = "1.10.0" }
strum = { version = "0.24.1", features = ["strum_macros"] }
strum_macros = { version = "0.24.3" }
syn = { version = "1.0.109" }
syn = { version = "2.0.15" }
test-case = { version = "3.0.0" }
textwrap = { version = "0.16.0" }
toml = { version = "0.7.2" }

113
README.md
View File

@@ -47,16 +47,16 @@ all while executing tens or hundreds of times faster than any individual tool.
Ruff is extremely actively developed and used in major open-source projects like:
- [pandas](https://github.com/pandas-dev/pandas)
- [FastAPI](https://github.com/tiangolo/fastapi)
- [Transformers (Hugging Face)](https://github.com/huggingface/transformers)
- [Apache Airflow](https://github.com/apache/airflow)
- [FastAPI](https://github.com/tiangolo/fastapi)
- [Hugging Face](https://github.com/huggingface/transformers)
- [Pandas](https://github.com/pandas-dev/pandas)
- [SciPy](https://github.com/scipy/scipy)
...and many more.
Read the [launch blog post](https://notes.crmarsh.com/python-tooling-could-be-much-much-faster) or
the most recent [project update](https://notes.crmarsh.com/ruff-the-first-200-releases).
Ruff is backed by [Astral](https://astral.sh). Read the [launch post](https://astral.sh/blog/announcing-astral-the-company-behind-ruff),
or the original [project announcement](https://notes.crmarsh.com/python-tooling-could-be-much-much-faster).
## Testimonials
@@ -137,7 +137,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com) hook:
```yaml
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
rev: 'v0.0.261'
rev: 'v0.0.264'
hooks:
- id: ruff
```
@@ -332,55 +332,68 @@ Ruff is released under the MIT license.
## Who's Using Ruff?
Ruff is used in a number of major open-source projects, including:
Ruff is used by a number of major open-source projects and companies, including:
- [pandas](https://github.com/pandas-dev/pandas)
- [FastAPI](https://github.com/tiangolo/fastapi)
- [Transformers (Hugging Face)](https://github.com/huggingface/transformers)
- [Diffusers (Hugging Face)](https://github.com/huggingface/diffusers)
- Amazon ([AWS SAM](https://github.com/aws/serverless-application-model))
- [Apache Airflow](https://github.com/apache/airflow)
- [SciPy](https://github.com/scipy/scipy)
- [Zulip](https://github.com/zulip/zulip)
- [Bokeh](https://github.com/bokeh/bokeh)
- [Pydantic](https://github.com/pydantic/pydantic)
- [PostHog](https://github.com/PostHog/posthog)
- [Dagster](https://github.com/dagster-io/dagster)
- [Dagger](https://github.com/dagger/dagger)
- [Sphinx](https://github.com/sphinx-doc/sphinx)
- [Hatch](https://github.com/pypa/hatch)
- [PDM](https://github.com/pdm-project/pdm)
- [Jupyter](https://github.com/jupyter-server/jupyter_server)
- [Great Expectations](https://github.com/great-expectations/great_expectations)
- [ONNX](https://github.com/onnx/onnx)
- [Polars](https://github.com/pola-rs/polars)
- [Ibis](https://github.com/ibis-project/ibis)
- [Synapse (Matrix)](https://github.com/matrix-org/synapse)
- [SnowCLI (Snowflake)](https://github.com/Snowflake-Labs/snowcli)
- [Dispatch (Netflix)](https://github.com/Netflix/dispatch)
- [Saleor](https://github.com/saleor/saleor)
- [Pynecone](https://github.com/pynecone-io/pynecone)
- [OpenBB](https://github.com/OpenBB-finance/OpenBBTerminal)
- [Home Assistant](https://github.com/home-assistant/core)
- [Pylint](https://github.com/PyCQA/pylint)
- [Cryptography (PyCA)](https://github.com/pyca/cryptography)
- [cibuildwheel (PyPA)](https://github.com/pypa/cibuildwheel)
- [build (PyPA)](https://github.com/pypa/build)
- AstraZeneca ([Magnus](https://github.com/AstraZeneca/magnus-core))
- Benchling ([Refac](https://github.com/benchling/refac))
- [Babel](https://github.com/python-babel/babel)
- [featuretools](https://github.com/alteryx/featuretools)
- [meson-python](https://github.com/mesonbuild/meson-python)
- [ZenML](https://github.com/zenml-io/zenml)
- [delta-rs](https://github.com/delta-io/delta-rs)
- [Starlite](https://github.com/starlite-api/starlite)
- [telemetry-airflow (Mozilla)](https://github.com/mozilla/telemetry-airflow)
- [Stable Baselines3](https://github.com/DLR-RM/stable-baselines3)
- [PaddlePaddle](https://github.com/PaddlePaddle/Paddle)
- [nox](https://github.com/wntrblm/nox)
- [Neon](https://github.com/neondatabase/neon)
- [The Algorithms](https://github.com/TheAlgorithms/Python)
- [Openverse](https://github.com/WordPress/openverse)
- [MegaLinter](https://github.com/oxsecurity/megalinter)
- [Bokeh](https://github.com/bokeh/bokeh)
- [Cryptography (PyCA)](https://github.com/pyca/cryptography)
- [Dagger](https://github.com/dagger/dagger)
- [Dagster](https://github.com/dagster-io/dagster)
- [DVC](https://github.com/iterative/dvc)
- [FastAPI](https://github.com/tiangolo/fastapi)
- [Gradio](https://github.com/gradio-app/gradio)
- [Great Expectations](https://github.com/great-expectations/great_expectations)
- Hugging Face ([Transformers](https://github.com/huggingface/transformers), [Datasets](https://github.com/huggingface/datasets), [Diffusers](https://github.com/huggingface/diffusers))
- [Hatch](https://github.com/pypa/hatch)
- [Home Assistant](https://github.com/home-assistant/core)
- [Ibis](https://github.com/ibis-project/ibis)
- [Jupyter](https://github.com/jupyter-server/jupyter_server)
- [LangChain](https://github.com/hwchase17/langchain)
- [LlamaIndex](https://github.com/jerryjliu/llama_index)
- Matrix ([Synapse](https://github.com/matrix-org/synapse))
- Meltano ([Meltano CLI](https://github.com/meltano/meltano), [Singer SDK](https://github.com/meltano/sdk))
- Modern Treasury ([Python SDK](https://github.com/Modern-Treasury/modern-treasury-python-sdk))
- Mozilla ([Firefox](https://github.com/mozilla/gecko-dev))
- [MegaLinter](https://github.com/oxsecurity/megalinter)
- Microsoft ([Semantic Kernel](https://github.com/microsoft/semantic-kernel), [ONNX Runtime](https://github.com/microsoft/onnxruntime))
- Netflix ([Dispatch](https://github.com/Netflix/dispatch))
- [Neon](https://github.com/neondatabase/neon)
- [ONNX](https://github.com/onnx/onnx)
- [OpenBB](https://github.com/OpenBB-finance/OpenBBTerminal)
- [PDM](https://github.com/pdm-project/pdm)
- [PaddlePaddle](https://github.com/PaddlePaddle/Paddle)
- [Pandas](https://github.com/pandas-dev/pandas)
- [Poetry](https://github.com/python-poetry/poetry)
- [Polars](https://github.com/pola-rs/polars)
- [PostHog](https://github.com/PostHog/posthog)
- Prefect ([Python SDK](https://github.com/PrefectHQ/prefect), [Marvin](https://github.com/PrefectHQ/marvin))
- [Pydantic](https://github.com/pydantic/pydantic)
- [PyInstaller](https://github.com/pyinstaller/pyinstaller)
- [Pylint](https://github.com/PyCQA/pylint)
- [Pynecone](https://github.com/pynecone-io/pynecone)
- [Robyn](https://github.com/sansyrox/robyn)
- Scale AI ([Launch SDK](https://github.com/scaleapi/launch-python-client))
- Snowflake ([SnowCLI](https://github.com/Snowflake-Labs/snowcli))
- [Saleor](https://github.com/saleor/saleor)
- [SciPy](https://github.com/scipy/scipy)
- [Sphinx](https://github.com/sphinx-doc/sphinx)
- [Stable Baselines3](https://github.com/DLR-RM/stable-baselines3)
- [Starlite](https://github.com/starlite-api/starlite)
- [The Algorithms](https://github.com/TheAlgorithms/Python)
- [Vega-Altair](https://github.com/altair-viz/altair)
- WordPress ([Openverse](https://github.com/WordPress/openverse))
- [ZenML](https://github.com/zenml-io/zenml)
- [Zulip](https://github.com/zulip/zulip)
- [build (PyPA)](https://github.com/pypa/build)
- [cibuildwheel (PyPA)](https://github.com/pypa/cibuildwheel)
- [delta-rs](https://github.com/delta-io/delta-rs)
- [featuretools](https://github.com/alteryx/featuretools)
- [meson-python](https://github.com/mesonbuild/meson-python)
- [nox](https://github.com/wntrblm/nox)
## License

View File

@@ -5,3 +5,6 @@ extend-exclude = ["snapshots", "black"]
trivias = "trivias"
hel = "hel"
whos = "whos"
spawnve = "spawnve"
ned = "ned"
poit = "poit"

View File

@@ -1,6 +1,6 @@
[package]
name = "flake8-to-ruff"
version = "0.0.261"
version = "0.0.264"
edition = { workspace = true }
rust-version = { workspace = true }

View File

@@ -1,6 +1,6 @@
[package]
name = "ruff"
version = "0.0.261"
version = "0.0.264"
authors.workspace = true
edition.workspace = true
rust-version.workspace = true
@@ -17,17 +17,19 @@ name = "ruff"
ruff_cache = { path = "../ruff_cache" }
ruff_diagnostics = { path = "../ruff_diagnostics", features = ["serde"] }
ruff_macros = { path = "../ruff_macros" }
ruff_python_ast = { path = "../ruff_python_ast" }
ruff_python_ast = { path = "../ruff_python_ast", features = ["serde"] }
ruff_python_semantic = { path = "../ruff_python_semantic" }
ruff_python_stdlib = { path = "../ruff_python_stdlib" }
ruff_rustpython = { path = "../ruff_rustpython" }
ruff_text_size = { workspace = true }
annotate-snippets = { version = "0.9.1", features = ["color"] }
anyhow = { workspace = true }
bitflags = { workspace = true }
chrono = { workspace = true }
clap = { workspace = true, features = ["derive", "string"], optional = true }
colored = { workspace = true }
dirs = { version = "4.0.0" }
dirs = { version = "5.0.0" }
fern = { version = "0.6.1" }
glob = { workspace = true }
globset = { workspace = true }
@@ -48,6 +50,7 @@ path-absolutize = { workspace = true, features = [
] }
pathdiff = { version = "0.2.1" }
pep440_rs = { version = "0.3.1", features = ["serde"] }
quick-junit = { version = "0.3.2" }
regex = { workspace = true }
result-like = { version = "0.4.6" }
rustc-hash = { workspace = true }
@@ -57,6 +60,7 @@ schemars = { workspace = true }
semver = { version = "1.0.16" }
serde = { workspace = true }
serde_json = { workspace = true }
similar = { workspace = true, features = ["inline"] }
shellexpand = { workspace = true }
smallvec = { workspace = true }
strum = { workspace = true }
@@ -68,9 +72,11 @@ typed-arena = { version = "2.0.2" }
unicode-width = { version = "0.1.10" }
[dev-dependencies]
insta = { workspace = true, features = ["yaml", "redactions"] }
insta = { workspace = true }
pretty_assertions = "1.3.0"
test-case = { workspace = true }
# Disable colored output in tests
colored = { workspace = true, features = ["no-color"] }
[features]
default = []

View File

@@ -9,6 +9,7 @@ def foo(x, y, z):
print(x, y, z)
# This is a real comment.
# # This is a (nested) comment.
#return True
return False

View File

@@ -1,11 +1,13 @@
# Error
assert True
assert True # S101
def fn():
x = 1
assert x == 1 # S101
assert x == 2 # S101
# Error
assert x == 1
# Error
assert x == 2
from typing import TYPE_CHECKING
if TYPE_CHECKING:
assert True # OK

View File

@@ -0,0 +1,20 @@
from subprocess import Popen, call, check_call, check_output, run
# Check different Popen wrappers are checked.
Popen("true", shell=True)
call("true", shell=True)
check_call("true", shell=True)
check_output("true", shell=True)
run("true", shell=True)
# Check values that truthy values are treated as true.
Popen("true", shell=1)
Popen("true", shell=[1])
Popen("true", shell={1: 1})
Popen("true", shell=(1,))
# Check command argument looks unsafe.
var_string = "true"
Popen(var_string, shell=True)
Popen([var_string], shell=True)
Popen([var_string, ""], shell=True)

View File

@@ -0,0 +1,20 @@
from subprocess import Popen, call, check_call, check_output, run
# Different Popen wrappers are checked.
Popen("true", shell=False)
call("true", shell=False)
check_call("true", shell=False)
check_output("true", shell=False)
run("true", shell=False)
# Values that falsey values are treated as false.
Popen("true", shell=0)
Popen("true", shell=[])
Popen("true", shell={})
Popen("true", shell=None)
# Unknown values are treated as falsey.
Popen("true", shell=True if True else False)
# No value is also caught.
Popen("true")

View File

@@ -0,0 +1,5 @@
def foo(shell):
pass
foo(shell=True)

View File

@@ -0,0 +1,25 @@
import os
import commands
import popen2
# Check all shell functions.
os.system("true")
os.popen("true")
os.popen2("true")
os.popen3("true")
os.popen4("true")
popen2.popen2("true")
popen2.popen3("true")
popen2.popen4("true")
popen2.Popen3("true")
popen2.Popen4("true")
commands.getoutput("true")
commands.getstatusoutput("true")
# Check command argument looks unsafe.
var_string = "true"
os.system(var_string)
os.system([var_string])
os.system([var_string, ""])

View File

@@ -0,0 +1,20 @@
import os
# Check all shell functions.
os.execl("true")
os.execle("true")
os.execlp("true")
os.execlpe("true")
os.execv("true")
os.execve("true")
os.execvp("true")
os.execvpe("true")
os.spawnl("true")
os.spawnle("true")
os.spawnlp("true")
os.spawnlpe("true")
os.spawnv("true")
os.spawnve("true")
os.spawnvp("true")
os.spawnvpe("true")
os.startfile("true")

View File

@@ -0,0 +1,44 @@
import os
# Check all functions.
subprocess.Popen("true")
subprocess.call("true")
subprocess.check_call("true")
subprocess.check_output("true")
subprocess.run("true")
os.system("true")
os.popen("true")
os.popen2("true")
os.popen3("true")
os.popen4("true")
popen2.popen2("true")
popen2.popen3("true")
popen2.popen4("true")
popen2.Popen3("true")
popen2.Popen4("true")
commands.getoutput("true")
commands.getstatusoutput("true")
os.execl("true")
os.execle("true")
os.execlp("true")
os.execlpe("true")
os.execv("true")
os.execve("true")
os.execvp("true")
os.execvpe("true")
os.spawnl("true")
os.spawnle("true")
os.spawnlp("true")
os.spawnlpe("true")
os.spawnv("true")
os.spawnve("true")
os.spawnvp("true")
os.spawnvpe("true")
os.startfile("true")
# Check it does not fail for full paths.
os.system("/bin/ls")
os.system("./bin/ls")
os.system(["/bin/ls"])
os.system(["/bin/ls", "/tmp"])
os.system(r"C:\\bin\ls")

View File

@@ -1,9 +1,10 @@
"""
Should emit:
B017 - on lines 20
B017 - on lines 23 and 41
"""
import asyncio
import unittest
import pytest
CONSTANT = True
@@ -34,3 +35,14 @@ class Foobar(unittest.TestCase):
def raises_with_absolute_reference(self):
with self.assertRaises(asyncio.CancelledError):
Foo()
def test_pytest_raises():
with pytest.raises(Exception):
raise ValueError("Hello")
with pytest.raises(Exception, "hello"):
raise ValueError("This is fine")
with pytest.raises(Exception, match="hello"):
raise ValueError("This is also fine")

View File

@@ -172,3 +172,14 @@ def iter_f(names):
if False:
return [lambda: i for i in range(3)] # error
for val in range(3):
def make_func(val=val):
def tmp():
return print(val)
return tmp
funcs.append(make_func())

View File

@@ -78,6 +78,21 @@ for _section, section_items in itertools.groupby(items, key=lambda p: p[1]):
for shopper in shoppers:
collect_shop_items(shopper, section_items) # B031
for _section, section_items in itertools.groupby(items, key=lambda p: p[1]):
_ = [collect_shop_items(shopper, section_items) for shopper in shoppers] # B031
for _section, section_items in itertools.groupby(items, key=lambda p: p[1]):
# The variable is overridden, skip checking.
_ = [_ for section_items in range(3)]
_ = [collect_shop_items(shopper, section_items) for shopper in shoppers]
for _section, section_items in itertools.groupby(items, key=lambda p: p[1]):
_ = [item for item in section_items]
for _section, section_items in itertools.groupby(items, key=lambda p: p[1]):
# The iterator is being used for the second time.
_ = [(item1, item2) for item1 in section_items for item2 in section_items] # B031
for _section, section_items in itertools.groupby(items, key=lambda p: p[1]):
if _section == "greens":
collect_shop_items(shopper, section_items)
@@ -134,6 +149,16 @@ for group in groupby(items, key=lambda p: p[1]):
collect_shop_items("Joe", group[1])
# https://github.com/charliermarsh/ruff/issues/4050
for _section, section_items in itertools.groupby(items, key=lambda p: p[1]):
if _section == "greens":
for item in section_items:
collect_shop_items(shopper, item)
elif _section == "frozen items":
_ = [item for item in section_items]
else:
collect_shop_items(shopper, section_items)
# Make sure we ignore - but don't fail on more complicated invocations
for _key, (_value1, _value2) in groupby(
[("a", (1, 2)), ("b", (3, 4)), ("a", (5, 6))], key=lambda p: p[1]

View File

@@ -7,11 +7,14 @@ set(set(x))
set(list(x))
set(tuple(x))
set(sorted(x))
set(sorted(x, key=lambda y: y))
set(reversed(x))
sorted(list(x))
sorted(tuple(x))
sorted(sorted(x))
sorted(sorted(x, key=lambda y: y))
sorted(reversed(x))
sorted(list(x), key=lambda y: y)
tuple(
list(
[x, 3, "hell"\

View File

@@ -0,0 +1,10 @@
dict({})
dict({'a': 1})
dict({'x': 1 for x in range(10)})
dict(
{'x': 1 for x in range(10)}
)
dict({}, a=1)
dict({x: 1 for x in range(1)}, a=1)

View File

@@ -1,4 +1,3 @@
# PIE802
any([x.id for x in bar])
all([x.id for x in bar])
any( # first comment
@@ -15,5 +14,6 @@ all(x.id for x in bar)
any(x.id for x in bar)
all((x.id for x in bar))
async def f() -> bool:
return all([await use_greeting(greeting) for greeting in await greetings()])

View File

@@ -21,3 +21,36 @@ def f_c():
def f_ok():
msg = "hello"
raise RuntimeError(msg)
def f_unfixable():
msg = "hello"
raise RuntimeError("This is an example exception")
def f_msg_in_nested_scope():
def nested():
msg = "hello"
raise RuntimeError("This is an example exception")
def f_msg_in_parent_scope():
msg = "hello"
def nested():
raise RuntimeError("This is an example exception")
def f_fix_indentation_check(foo):
if foo:
raise RuntimeError("This is an example exception")
else:
if foo == "foo":
raise RuntimeError(f"This is an exception: {foo}")
raise RuntimeError("This is an exception: {}".format(foo))
# Report these, but don't fix them
if foo: raise RuntimeError("This is an example exception")
if foo: x = 1; raise RuntimeError("This is an example exception")

View File

@@ -0,0 +1,16 @@
import typing as t # banned
import typing as ty # banned
import numpy as nmp # banned
import numpy as npy # banned
import tensorflow.keras.backend as K # banned
import torch.nn.functional as F # banned
from tensorflow.keras import backend as K # banned
from torch.nn import functional as F # banned
from typing import Any # ok
import numpy as np # ok
import tensorflow as tf # ok
import torch.nn as nn # ok
from tensorflow.keras import backend # ok

View File

@@ -0,0 +1,10 @@
from logging.config import BaseConfigurator # banned
from typing import Any, Dict # banned
from typing import * # banned
from pandas import DataFrame # banned
from pandas import * # banned
import logging.config # ok
import typing # ok
import pandas # ok

View File

@@ -11,3 +11,7 @@ _T = TypeVar("_T") # OK
_TTuple = TypeVarTuple("_TTuple") # OK
_P = ParamSpec("_P") # OK
def f():
T = TypeVar("T") # OK

View File

@@ -11,3 +11,6 @@ _T = TypeVar("_T") # OK
_TTuple = TypeVarTuple("_TTuple") # OK
_P = ParamSpec("_P") # OK
def f():
T = TypeVar("T") # OK

View File

@@ -46,3 +46,48 @@ field229: dict[int, int] = {1: 2, **{3: 4}} # Y015 Only simple default values a
field23 = "foo" + "bar" # Y015 Only simple default values are allowed for assignments
field24 = b"foo" + b"bar" # Y015 Only simple default values are allowed for assignments
field25 = 5 * 5 # Y015 Only simple default values are allowed for assignments
# We shouldn't emit Y015 within functions
def f():
field26: list[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
# We shouldn't emit Y015 for __slots__ or __match_args__
class Class1:
__slots__ = (
'_one',
'_two',
'_three',
'_four',
'_five',
'_six',
'_seven',
'_eight',
'_nine',
'_ten',
'_eleven',
)
__match_args__ = (
'one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
'eleven',
)
# We shouldn't emit Y015 for __all__
__all__ = ["Class1"]
# Ignore the following for PYI015
field26 = typing.Sequence[int]
field27 = list[str]
field28 = builtins.str
field29 = str
field30 = str | bytes | None

View File

@@ -53,3 +53,48 @@ field229: dict[int, int] = {1: 2, **{3: 4}} # Y015 Only simple default values a
field23 = "foo" + "bar" # Y015 Only simple default values are allowed for assignments
field24 = b"foo" + b"bar" # Y015 Only simple default values are allowed for assignments
field25 = 5 * 5 # Y015 Only simple default values are allowed for assignments
# We shouldn't emit Y015 within functions
def f():
field26: list[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
# We shouldn't emit Y015 for __slots__ or __match_args__
class Class1:
__slots__ = (
'_one',
'_two',
'_three',
'_four',
'_five',
'_six',
'_seven',
'_eight',
'_nine',
'_ten',
'_eleven',
)
__match_args__ = (
'one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
'eleven',
)
# We shouldn't emit Y015 for __all__
__all__ = ["Class1"]
# Ignore the following for PYI015
field26 = typing.Sequence[int]
field27 = list[str]
field28 = builtins.str
field29 = str
field30 = str | bytes | None

View File

@@ -0,0 +1,35 @@
# Shouldn't affect non-union field types.
field1: str
# Should emit for duplicate field types.
field2: str | str # PYI016: Duplicate union member `str`
# Should emit for union types in arguments.
def func1(arg1: int | int): # PYI016: Duplicate union member `int`
print(arg1)
# Should emit for unions in return types.
def func2() -> str | str: # PYI016: Duplicate union member `str`
return "my string"
# Should emit in longer unions, even if not directly adjacent.
field3: str | str | int # PYI016: Duplicate union member `str`
field4: int | int | str # PYI016: Duplicate union member `int`
field5: str | int | str # PYI016: Duplicate union member `str`
field6: int | bool | str | int # PYI016: Duplicate union member `int`
# Shouldn't emit for non-type unions.
field7 = str | str
# Should emit for strangely-bracketed unions.
field8: int | (str | int) # PYI016: Duplicate union member `int`
# Should handle user brackets when fixing.
field9: int | (int | str) # PYI016: Duplicate union member `int`
field10: (str | int) | str # PYI016: Duplicate union member `str`
# Should emit for nested unions.
field11: dict[int | int, str]

View File

@@ -0,0 +1,32 @@
# Shouldn't affect non-union field types.
field1: str
# Should emit for duplicate field types.
field2: str | str # PYI016: Duplicate union member `str`
# Should emit for union types in arguments.
def func1(arg1: int | int): # PYI016: Duplicate union member `int`
print(arg1)
# Should emit for unions in return types.
def func2() -> str | str: # PYI016: Duplicate union member `str`
return "my string"
# Should emit in longer unions, even if not directly adjacent.
field3: str | str | int # PYI016: Duplicate union member `str`
field4: int | int | str # PYI016: Duplicate union member `int`
field5: str | int | str # PYI016: Duplicate union member `str`
field6: int | bool | str | int # PYI016: Duplicate union member `int`
# Shouldn't emit for non-type unions.
field7 = str | str
# Should emit for strangely-bracketed unions.
field8: int | (str | int) # PYI016: Duplicate union member `int`
# Should handle user brackets when fixing.
field9: int | (int | str) # PYI016: Duplicate union member `int`
field10: (str | int) | str # PYI016: Duplicate union member `str`
# Should emit for nested unions.
field11: dict[int | int, str]

View File

@@ -49,3 +49,18 @@ def test_list_expressions(param1, param2):
@pytest.mark.parametrize([some_expr, "param2"], [1, 2, 3])
def test_list_mixed_expr_literal(param1, param2):
...
@pytest.mark.parametrize(("param1, " "param2, " "param3"), [(1, 2, 3), (4, 5, 6)])
def test_implicit_str_concat_with_parens(param1, param2, param3):
...
@pytest.mark.parametrize("param1, " "param2, " "param3", [(1, 2, 3), (4, 5, 6)])
def test_implicit_str_concat_no_parens(param1, param2, param3):
...
@pytest.mark.parametrize((("param1, " "param2, " "param3")), [(1, 2, 3), (4, 5, 6)])
def test_implicit_str_concat_with_multi_parens(param1, param2, param3):
...

View File

@@ -3,7 +3,7 @@
###
def x():
a = 1
return a # error
return a # RET504
# Can be refactored false positives
@@ -211,10 +211,10 @@ def nonlocal_assignment():
def decorator() -> Flask:
app = Flask(__name__)
@app.route('/hello')
@app.route("/hello")
def hello() -> str:
"""Hello endpoint."""
return 'Hello, World!'
return "Hello, World!"
return app
@@ -222,12 +222,13 @@ def decorator() -> Flask:
def default():
y = 1
def f(x = y) -> X:
def f(x=y) -> X:
return x
return y
# Multiple assignment
def get_queryset(option_1, option_2):
queryset: Any = None
queryset = queryset.filter(a=1)
@@ -246,4 +247,28 @@ def get_queryset():
def get_queryset():
queryset = Model.filter(a=1)
return queryset # error
return queryset # RET504
# Function arguments
def str_to_bool(val):
if isinstance(val, bool):
return val
val = val.strip().lower()
if val in ("1", "true", "yes"):
return True
return False
def str_to_bool(val):
if isinstance(val, bool):
return val
val = 1
return val # RET504
def str_to_bool(val):
if isinstance(val, bool):
return some_obj
return val

View File

@@ -59,3 +59,15 @@ def bar():
return foo()
except ValueError:
pass
def with_ellipsis():
try:
foo()
except ValueError:
...
def with_ellipsis_and_return():
try:
return foo()
except ValueError:
...

View File

@@ -42,3 +42,113 @@ if False and f() and a and g() and b: # OK
if a and False and f() and b and g(): # OK
pass
a or "" or True # SIM222
a or "foo" or True or "bar" # SIM222
a or 0 or True # SIM222
a or 1 or True or 2 # SIM222
a or 0.0 or True # SIM222
a or 0.1 or True or 0.2 # SIM222
a or [] or True # SIM222
a or list([]) or True # SIM222
a or [1] or True or [2] # SIM222
a or list([1]) or True or list([2]) # SIM222
a or {} or True # SIM222
a or dict() or True # SIM222
a or {1: 1} or True or {2: 2} # SIM222
a or dict({1: 1}) or True or dict({2: 2}) # SIM222
a or set() or True # SIM222
a or set(set()) or True # SIM222
a or {1} or True or {2} # SIM222
a or set({1}) or True or set({2}) # SIM222
a or () or True # SIM222
a or tuple(()) or True # SIM222
a or (1,) or True or (2,) # SIM222
a or tuple((1,)) or True or tuple((2,)) # SIM222
a or frozenset() or True # SIM222
a or frozenset(frozenset()) or True # SIM222
a or frozenset({1}) or True or frozenset({2}) # SIM222
a or frozenset(frozenset({1})) or True or frozenset(frozenset({2})) # SIM222
# Inside test `a` is simplified.
bool(a or [1] or True or [2]) # SIM222
assert a or [1] or True or [2] # SIM222
if (a or [1] or True or [2]) and (a or [1] or True or [2]): # SIM222
pass
0 if a or [1] or True or [2] else 1 # SIM222
while a or [1] or True or [2]: # SIM222
pass
[
0
for a in range(10)
for b in range(10)
if a or [1] or True or [2] # SIM222
if b or [1] or True or [2] # SIM222
]
{
0
for a in range(10)
for b in range(10)
if a or [1] or True or [2] # SIM222
if b or [1] or True or [2] # SIM222
}
{
0: 0
for a in range(10)
for b in range(10)
if a or [1] or True or [2] # SIM222
if b or [1] or True or [2] # SIM222
}
(
0
for a in range(10)
for b in range(10)
if a or [1] or True or [2] # SIM222
if b or [1] or True or [2] # SIM222
)
# Outside test `a` is not simplified.
a or [1] or True or [2] # SIM222
if (a or [1] or True or [2]) == (a or [1]): # SIM222
pass
if f(a or [1] or True or [2]): # SIM222
pass

View File

@@ -37,3 +37,113 @@ if True or f() or a or g() or b: # OK
if a or True or f() or b or g(): # OK
pass
a and "" and False # SIM223
a and "foo" and False and "bar" # SIM223
a and 0 and False # SIM223
a and 1 and False and 2 # SIM223
a and 0.0 and False # SIM223
a and 0.1 and False and 0.2 # SIM223
a and [] and False # SIM223
a and list([]) and False # SIM223
a and [1] and False and [2] # SIM223
a and list([1]) and False and list([2]) # SIM223
a and {} and False # SIM223
a and dict() and False # SIM223
a and {1: 1} and False and {2: 2} # SIM223
a and dict({1: 1}) and False and dict({2: 2}) # SIM223
a and set() and False # SIM223
a and set(set()) and False # SIM223
a and {1} and False and {2} # SIM223
a and set({1}) and False and set({2}) # SIM223
a and () and False # SIM222
a and tuple(()) and False # SIM222
a and (1,) and False and (2,) # SIM222
a and tuple((1,)) and False and tuple((2,)) # SIM222
a and frozenset() and False # SIM222
a and frozenset(frozenset()) and False # SIM222
a and frozenset({1}) and False and frozenset({2}) # SIM222
a and frozenset(frozenset({1})) and False and frozenset(frozenset({2})) # SIM222
# Inside test `a` is simplified.
bool(a and [] and False and []) # SIM223
assert a and [] and False and [] # SIM223
if (a and [] and False and []) or (a and [] and False and []): # SIM223
pass
0 if a and [] and False and [] else 1 # SIM222
while a and [] and False and []: # SIM223
pass
[
0
for a in range(10)
for b in range(10)
if a and [] and False and [] # SIM223
if b and [] and False and [] # SIM223
]
{
0
for a in range(10)
for b in range(10)
if a and [] and False and [] # SIM223
if b and [] and False and [] # SIM223
}
{
0: 0
for a in range(10)
for b in range(10)
if a and [] and False and [] # SIM223
if b and [] and False and [] # SIM223
}
(
0
for a in range(10)
for b in range(10)
if a and [] and False and [] # SIM223
if b and [] and False and [] # SIM223
)
# Outside test `a` is not simplified.
a and [] and False and [] # SIM223
if (a and [] and False and []) == (a and []): # SIM223
pass
if f(a and [] and False and []): # SIM223
pass

View File

@@ -31,3 +31,6 @@ typing.TypedDict.anything()
# import aliases are resolved
import typing as totally_not_typing
totally_not_typing.TypedDict
# relative imports are respected
from .typing import TypedDict

View File

@@ -1,11 +0,0 @@
# module members cannot be imported with that syntax
import typing.TypedDict
# we don't track reassignments
import typing, other
typing = other
typing.TypedDict()
# yet another false positive
def foo(typing):
typing.TypedDict()

View File

@@ -1,3 +1,6 @@
from __future__ import annotations
def f():
# Even in strict mode, this shouldn't rase an error, since `pkg` is used at runtime,
# and implicitly imports `pkg.bar`.

View File

@@ -22,3 +22,6 @@ from bar import (
a, # comment 7
b, # comment 8
)
# comment 9
from baz import * # comment 10

View File

@@ -0,0 +1,4 @@
from mypackage.subpackage import ( # long comment that seems to be a problem
a_long_variable_name_that_causes_problems,
items,
)

View File

@@ -0,0 +1,3 @@
"""Hello, world!"""
x = 1

View File

@@ -0,0 +1,7 @@
from __future__ import annotations
import os
import sys
import pytz
import django.settings
from library import foo
from . import local

View File

@@ -39,3 +39,11 @@ class Test(unittest.TestCase):
def testTest(self):
assert True
from typing import override
@override
def BAD_FUNC():
pass

View File

@@ -13,3 +13,11 @@ class C:
myObj2 = namedtuple("MyObj2", ["a", "b"])
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
Point2D = TypedDict('Point2D', {'in': int, 'x-y': int})
class D(TypedDict):
lower: int
CONSTANT: str
mixedCase: bool
_mixedCase: list
mixed_Case: set

View File

@@ -4,11 +4,11 @@ if not X is Y:
#: E714
if not X.B is Y:
pass
#: E714
#: Okay
if not X is Y is not Z:
pass
#: Okay
if not X is not Y:
pass

View File

@@ -13,6 +13,7 @@ f = lambda: (yield from g())
class F:
f = lambda x: 2 * x
f = object()
f.method = lambda: "Method"
f = {}
@@ -21,3 +22,30 @@ f = []
f.append(lambda x: x**2)
f = g = lambda x: x**2
lambda: "no-op"
# Annotated
from typing import Callable, ParamSpec
P = ParamSpec("P")
# ParamSpec cannot be used in this context, so do not preserve the annotation.
f: Callable[P, int] = lambda *args: len(args)
f: Callable[[], None] = lambda: None
f: Callable[..., None] = lambda a, b: None
f: Callable[[int], int] = lambda x: 2 * x
# Let's use the `Callable` type from `collections.abc` instead.
from collections.abc import Callable
f: Callable[[str, int], str] = lambda a, b: a * b
f: Callable[[str, int], tuple[str, int]] = lambda a, b: (a, b)
f: Callable[[str, int, list[str]], list[str]] = lambda a, b, /, c: [*c, a * b]
# Override `Callable`
class Callable:
pass
# Do not copy the annotation from here on out.
f: Callable[[str, int], str] = lambda a, b: a * b

View File

@@ -97,10 +97,10 @@ if length > options.max_line_length:
if os.path.exists(os.path.join(path, PEP8_BIN)):
cmd = ([os.path.join(path, PEP8_BIN)] +
self._pep8_options(targetfile))
#: W191
#: W191 - okay
'''
multiline string with tab in it'''
#: E101 W191
#: E101 (W191 okay)
'''multiline string
with tabs
and spaces
@@ -142,4 +142,10 @@ def test_keys(self):
x = [
'abc'
]
#:
#: W191 - okay
''' multiline string with tab in it, same lines'''
""" here we're using '''different delimiters'''"""
'''
multiline string with tab in it, different lines
'''
" single line string with tab in it"

View File

@@ -9,6 +9,9 @@ def f():
# Here's a standalone comment that's over the limit.
x = 2
# Another standalone that is preceded by a newline and indent toke and is over the limit.
print("Here's a string that's over the limit, but it's not a docstring.")

View File

@@ -115,6 +115,20 @@ def f(x, *args, **kwargs):
return x
def f(x, *, y, z):
"""Do something.
Args:
x: some first value
Keyword Args:
y (int): the other value
z (int): the last value
"""
return x, y, z
class Test:
def f(self, /, arg1: int) -> None:
"""

View File

@@ -11,3 +11,9 @@
"{}".format(1, 2, 3) # F523
"{:{}}".format(1, 2) # No issues
"{:{}}".format(1, 2, 3) # F523
# With *args
"{0}{1}".format(*args) # No issues
"{0}{1}".format(1, *args) # No issues
"{0}{1}".format(1, 2, *args) # No issues
"{0}{1}".format(1, 2, 3, *args) # F523

View File

@@ -0,0 +1,13 @@
def redef(value):
match value:
case True:
def fun(x, y):
return x
case False:
def fun(x, y):
return y
return fun

View File

@@ -132,3 +132,8 @@ def in_ipython_notebook() -> bool:
except NameError:
return False # not in notebook
return True
def named_expr():
if any((key := (value := x)) for x in ["ok"]):
print(key)

View File

@@ -121,3 +121,8 @@ def f(x: int):
print("A")
case y:
pass
def f():
if any((key := (value := x)) for x in ["ok"]):
print(key)

View File

@@ -1,11 +1,16 @@
x = 1 # type: ignore
x = 1 # type ignore
x = 1 # type:ignore
x = 1 # type: ignore[attr-defined] # type: ignore
x = 1
x = 1 # type ignore
x = 1 # type ignore # noqa
x = 1 # type: ignore[attr-defined]
x = 1 # type: ignore[attr-defined, name-defined]
x = 1 # type: ignore[attr-defined] # type: ignore[type-mismatch]
x = 1 # type: ignore[type-mismatch] # noqa
x = 1 # type: ignore [attr-defined]
x = 1 # type: ignore [attr-defined, name-defined]
x = 1 # type: ignore [type-mismatch] # noqa
x = 1 # type: Union[int, str]
x = 1 # type: ignoreme

View File

@@ -0,0 +1,16 @@
x = 1 # pyright: ignore
x = 1 # pyright:ignore
x = 1 # pyright: ignore[attr-defined] # pyright: ignore
x = 1
x = 1 # pyright ignore
x = 1 # pyright ignore # noqa
x = 1 # pyright: ignore[attr-defined]
x = 1 # pyright: ignore[attr-defined, name-defined]
x = 1 # pyright: ignore[attr-defined] # pyright: ignore[type-mismatch]
x = 1 # pyright: ignore[type-mismatch] # noqa
x = 1 # pyright: ignore [attr-defined]
x = 1 # pyright: ignore [attr-defined, name-defined]
x = 1 # pyright: ignore [type-mismatch] # noqa
x = 1 # pyright: Union[int, str]
x = 1 # pyright: ignoreme

View File

@@ -1,3 +1,6 @@
import typing
from typing import cast
# For -> for, variable reused
for i in []:
for i in []: # error
@@ -43,6 +46,9 @@ for i in []:
# For -> assignment
for i in []:
# ignore typing cast
i = cast(int, i)
i = typing.cast(int, i)
i = 5 # error
# For -> augmented assignment
@@ -53,6 +59,10 @@ for i in []:
for i in []:
i: int = 5 # error
# For -> annotated assignment without value
for i in []:
i: int # no error
# Async for -> for, variable reused
async for i in []:
for i in []: # error

View File

@@ -0,0 +1,51 @@
class TestClass:
def __bool__(self):
...
def __bool__(self, x): # too many mandatory args
...
def __bool__(self, x=1): # additional optional args OK
...
def __bool__(self, *args): # varargs OK
...
def __bool__(): # ignored; should be caughty by E0211/N805
...
@staticmethod
def __bool__():
...
@staticmethod
def __bool__(x): # too many mandatory args
...
@staticmethod
def __bool__(x=1): # additional optional args OK
...
def __eq__(self, other): # multiple args
...
def __eq__(self, other=1): # expected arg is optional
...
def __eq__(self): # too few mandatory args
...
def __eq__(self, other, other_other): # too many mandatory args
...
def __round__(self): # allow zero additional args.
...
def __round__(self, x): # allow one additional args.
...
def __round__(self, x, y): # disallow 2 args
...
def __round__(self, x, y, z=2): # disallow 3 args even when one is optional
...

View File

@@ -83,3 +83,26 @@ print('Hello %s (%s)' % bar['bop'])
print('Hello %(arg)s' % bar)
print('Hello %(arg)s' % bar.baz)
print('Hello %(arg)s' % bar['bop'])
# Hanging modulos
(
"foo %s "
"bar %s"
) % (x, y)
(
"foo %(foo)s "
"bar %(bar)s"
) % {"foo": x, "bar": y}
(
"""foo %s"""
% (x,)
)
(
"""
foo %s
"""
% (x,)
)

View File

@@ -34,28 +34,6 @@ pytest.param('"%8s" % (None,)', id="unsafe width-string conversion"),
"%(and)s" % {"and": 2}
# OK (arguably false negatives)
(
"foo %s "
"bar %s"
) % (x, y)
(
"foo %(foo)s "
"bar %(bar)s"
) % {"foo": x, "bar": y}
(
"""foo %s"""
% (x,)
)
(
"""
foo %s
"""
% (x,)
)
'Hello %s' % bar
'Hello %s' % bar.baz

View File

@@ -46,6 +46,14 @@ print("foo {} ".format(x))
'({}={{0!e}})'.format(a)
"{[b]}".format(a)
'{[b]}'.format(a)
"""{[b]}""".format(a)
'''{[b]}'''.format(a)
###
# Non-errors
###

View File

@@ -0,0 +1,27 @@
import typing
from dataclasses import dataclass, field
from typing import ClassVar, Sequence
KNOWINGLY_MUTABLE_DEFAULT = []
@dataclass()
class A:
mutable_default: list[int] = []
immutable_annotation: typing.Sequence[int] = []
without_annotation = []
ignored_via_comment: list[int] = [] # noqa: RUF008
correct_code: list[int] = KNOWINGLY_MUTABLE_DEFAULT
perfectly_fine: list[int] = field(default_factory=list)
class_variable: typing.ClassVar[list[int]] = []
@dataclass
class B:
mutable_default: list[int] = []
immutable_annotation: Sequence[int] = []
without_annotation = []
ignored_via_comment: list[int] = [] # noqa: RUF008
correct_code: list[int] = KNOWINGLY_MUTABLE_DEFAULT
perfectly_fine: list[int] = field(default_factory=list)
class_variable: ClassVar[list[int]] = []

View File

@@ -0,0 +1,42 @@
import datetime
import re
import typing
from dataclasses import dataclass, field
from pathlib import Path
from typing import ClassVar, NamedTuple
def default_function() -> list[int]:
return []
class ImmutableType(NamedTuple):
something: int = 8
@dataclass()
class A:
hidden_mutable_default: list[int] = default_function()
class_variable: typing.ClassVar[list[int]] = default_function()
another_class_var: ClassVar[list[int]] = default_function()
fine_path: Path = Path()
fine_date: datetime.date = datetime.date(2042, 1, 1)
fine_timedelta: datetime.timedelta = datetime.timedelta(hours=7)
fine_tuple: tuple[int] = tuple([1])
fine_regex: re.Pattern = re.compile(r".*")
DEFAULT_IMMUTABLETYPE_FOR_ALL_DATACLASSES = ImmutableType(40)
DEFAULT_A_FOR_ALL_DATACLASSES = A([1, 2, 3])
@dataclass
class B:
hidden_mutable_default: list[int] = default_function()
another_dataclass: A = A()
not_optimal: ImmutableType = ImmutableType(20)
good_variant: ImmutableType = DEFAULT_IMMUTABLETYPE_FOR_ALL_DATACLASSES
okay_variant: A = DEFAULT_A_FOR_ALL_DATACLASSES
fine_dataclass_function: list[int] = field(default_factory=list)

View File

@@ -4,12 +4,12 @@ use itertools::Itertools;
use libcst_native::{
Codegen, CodegenState, ImportNames, ParenthesizableWhitespace, SmallStatement, Statement,
};
use rustpython_parser::ast::{ExcepthandlerKind, Expr, Keyword, Location, Stmt, StmtKind};
use ruff_text_size::{TextLen, TextRange, TextSize};
use rustpython_parser::ast::{ExcepthandlerKind, Expr, Keyword, Stmt, StmtKind};
use rustpython_parser::{lexer, Mode, Tok};
use ruff_diagnostics::Edit;
use ruff_python_ast::helpers;
use ruff_python_ast::helpers::to_absolute;
use ruff_python_ast::imports::{AnyImport, Import};
use ruff_python_ast::newlines::NewlineWithTrailingNewline;
use ruff_python_ast::source_code::{Indexer, Locator, Stylist};
@@ -102,20 +102,17 @@ fn is_lone_child(child: &Stmt, parent: &Stmt, deleted: &[&Stmt]) -> Result<bool>
/// Return the location of a trailing semicolon following a `Stmt`, if it's part
/// of a multi-statement line.
fn trailing_semicolon(stmt: &Stmt, locator: &Locator) -> Option<Location> {
let contents = locator.skip(stmt.end_location.unwrap());
for (row, line) in NewlineWithTrailingNewline::from(contents).enumerate() {
let trimmed = line.trim();
fn trailing_semicolon(stmt: &Stmt, locator: &Locator) -> Option<TextSize> {
let contents = locator.after(stmt.end());
for line in NewlineWithTrailingNewline::from(contents) {
let trimmed = line.trim_start();
if trimmed.starts_with(';') {
let column = line
.char_indices()
.find_map(|(column, char)| if char == ';' { Some(column) } else { None })
.unwrap();
return Some(to_absolute(
Location::new(row + 1, column),
stmt.end_location.unwrap(),
));
let colon_offset = line.text_len() - trimmed.text_len();
return Some(stmt.end() + line.start() + colon_offset);
}
if !trimmed.starts_with('\\') {
break;
}
@@ -124,42 +121,36 @@ fn trailing_semicolon(stmt: &Stmt, locator: &Locator) -> Option<Location> {
}
/// Find the next valid break for a `Stmt` after a semicolon.
fn next_stmt_break(semicolon: Location, locator: &Locator) -> Location {
let start_location = Location::new(semicolon.row(), semicolon.column() + 1);
let contents = locator.skip(start_location);
for (row, line) in NewlineWithTrailingNewline::from(contents).enumerate() {
fn next_stmt_break(semicolon: TextSize, locator: &Locator) -> TextSize {
let start_location = semicolon + TextSize::from(1);
let contents = &locator.contents()[usize::from(start_location)..];
for line in NewlineWithTrailingNewline::from(contents) {
let trimmed = line.trim();
// Skip past any continuations.
if trimmed.starts_with('\\') {
continue;
}
return if trimmed.is_empty() {
// If the line is empty, then despite the previous statement ending in a
// semicolon, we know that it's not a multi-statement line.
to_absolute(Location::new(row + 1, 0), start_location)
} else {
// Otherwise, find the start of the next statement. (Or, anything that isn't
// whitespace.)
let column = line
.char_indices()
.find_map(|(column, char)| {
if char.is_whitespace() {
None
} else {
Some(column)
}
})
.unwrap();
to_absolute(Location::new(row + 1, column), start_location)
};
return start_location
+ if trimmed.is_empty() {
// If the line is empty, then despite the previous statement ending in a
// semicolon, we know that it's not a multi-statement line.
line.start()
} else {
// Otherwise, find the start of the next statement. (Or, anything that isn't
// whitespace.)
let relative_offset = line.find(|c: char| !c.is_whitespace()).unwrap();
line.start() + TextSize::try_from(relative_offset).unwrap()
};
}
Location::new(start_location.row() + 1, 0)
locator.line_end(start_location)
}
/// Return `true` if a `Stmt` occurs at the end of a file.
fn is_end_of_file(stmt: &Stmt, locator: &Locator) -> bool {
let contents = locator.skip(stmt.end_location.unwrap());
contents.is_empty()
stmt.end() == locator.contents().text_len()
}
/// Return the `Fix` to use when deleting a `Stmt`.
@@ -190,33 +181,23 @@ pub fn delete_stmt(
{
// If removing this node would lead to an invalid syntax tree, replace
// it with a `pass`.
Ok(Edit::replacement(
"pass".to_string(),
stmt.location,
stmt.end_location.unwrap(),
))
Ok(Edit::range_replacement("pass".to_string(), stmt.range()))
} else {
Ok(if let Some(semicolon) = trailing_semicolon(stmt, locator) {
let next = next_stmt_break(semicolon, locator);
Edit::deletion(stmt.location, next)
} else if helpers::match_leading_content(stmt, locator) {
Edit::deletion(stmt.location, stmt.end_location.unwrap())
} else if helpers::preceded_by_continuation(stmt, indexer) {
if is_end_of_file(stmt, locator) && stmt.location.column() == 0 {
Edit::deletion(stmt.start(), next)
} else if helpers::has_leading_content(stmt, locator) {
Edit::range_deletion(stmt.range())
} else if helpers::preceded_by_continuation(stmt, indexer, locator) {
if is_end_of_file(stmt, locator) && locator.is_at_start_of_line(stmt.start()) {
// Special-case: a file can't end in a continuation.
Edit::replacement(
stylist.line_ending().to_string(),
stmt.location,
stmt.end_location.unwrap(),
)
Edit::range_replacement(stylist.line_ending().to_string(), stmt.range())
} else {
Edit::deletion(stmt.location, stmt.end_location.unwrap())
Edit::range_deletion(stmt.range())
}
} else {
Edit::deletion(
Location::new(stmt.location.row(), 0),
Location::new(stmt.end_location.unwrap().row() + 1, 0),
)
let range = locator.full_lines_range(stmt.range());
Edit::range_deletion(range)
})
}
}
@@ -231,7 +212,7 @@ pub fn remove_unused_imports<'a>(
indexer: &Indexer,
stylist: &Stylist,
) -> Result<Edit> {
let module_text = locator.slice(stmt);
let module_text = locator.slice(stmt.range());
let mut tree = match_module(module_text)?;
let Some(Statement::Simple(body)) = tree.body.first_mut() else {
@@ -337,11 +318,7 @@ pub fn remove_unused_imports<'a>(
};
tree.codegen(&mut state);
Ok(Edit::replacement(
state.to_string(),
stmt.location,
stmt.end_location.unwrap(),
))
Ok(Edit::range_replacement(state.to_string(), stmt.range()))
}
}
@@ -353,15 +330,14 @@ pub fn remove_unused_imports<'a>(
/// For this behavior, set `remove_parentheses` to `true`.
pub fn remove_argument(
locator: &Locator,
call_at: Location,
expr_at: Location,
expr_end: Location,
call_at: TextSize,
expr_range: TextRange,
args: &[Expr],
keywords: &[Keyword],
remove_parentheses: bool,
) -> Result<Edit> {
// TODO(sbrugman): Preserve trailing comments.
let contents = locator.skip(call_at);
let contents = locator.after(call_at);
let mut fix_start = None;
let mut fix_end = None;
@@ -374,13 +350,13 @@ pub fn remove_argument(
if n_arguments == 1 {
// Case 1: there is only one argument.
let mut count: usize = 0;
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
for (tok, range) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
if matches!(tok, Tok::Lpar) {
if count == 0 {
fix_start = Some(if remove_parentheses {
start
range.start()
} else {
Location::new(start.row(), start.column() + 1)
range.start() + TextSize::from(1)
});
}
count += 1;
@@ -390,9 +366,9 @@ pub fn remove_argument(
count -= 1;
if count == 0 {
fix_end = Some(if remove_parentheses {
end
range.end()
} else {
Location::new(end.row(), end.column() - 1)
range.end() - TextSize::from(1)
});
break;
}
@@ -400,27 +376,27 @@ pub fn remove_argument(
}
} else if args
.iter()
.map(|node| node.location)
.chain(keywords.iter().map(|node| node.location))
.any(|location| location > expr_at)
.map(Expr::start)
.chain(keywords.iter().map(Keyword::start))
.any(|location| location > expr_range.start())
{
// Case 2: argument or keyword is _not_ the last node.
let mut seen_comma = false;
for (start, tok, end) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
for (tok, range) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
if seen_comma {
if matches!(tok, Tok::NonLogicalNewline) {
// Also delete any non-logical newlines after the comma.
continue;
}
fix_end = Some(if matches!(tok, Tok::Newline) {
end
range.end()
} else {
start
range.start()
});
break;
}
if start == expr_at {
fix_start = Some(start);
if range.start() == expr_range.start() {
fix_start = Some(range.start());
}
if fix_start.is_some() && matches!(tok, Tok::Comma) {
seen_comma = true;
@@ -429,13 +405,13 @@ pub fn remove_argument(
} else {
// Case 3: argument or keyword is the last node, so we have to find the last
// comma in the stmt.
for (start, tok, _) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
if start == expr_at {
fix_end = Some(expr_end);
for (tok, range) in lexer::lex_located(contents, Mode::Module, call_at).flatten() {
if range.start() == expr_range.start() {
fix_end = Some(expr_range.end());
break;
}
if matches!(tok, Tok::Comma) {
fix_start = Some(start);
fix_start = Some(range.start());
}
}
}
@@ -482,11 +458,8 @@ pub fn get_or_import_symbol(
//
// By adding this no-op edit, we force the `unused-imports` fix to conflict with the
// `sys-exit-alias` fix, and thus will avoid applying both fixes in the same pass.
let import_edit = Edit::replacement(
locator.slice(source).to_string(),
source.location,
source.end_location.unwrap(),
);
let import_edit =
Edit::range_replacement(locator.slice(source.range()).to_string(), source.range());
Ok((import_edit, binding))
} else {
if let Some(stmt) = importer.get_import_from(module) {
@@ -527,8 +500,8 @@ pub fn get_or_import_symbol(
#[cfg(test)]
mod tests {
use anyhow::Result;
use ruff_text_size::TextSize;
use rustpython_parser as parser;
use rustpython_parser::ast::Location;
use ruff_python_ast::source_code::Locator;
@@ -546,19 +519,13 @@ mod tests {
let program = parser::parse_program(contents, "<filename>")?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert_eq!(
trailing_semicolon(stmt, &locator),
Some(Location::new(1, 5))
);
assert_eq!(trailing_semicolon(stmt, &locator), Some(TextSize::from(5)));
let contents = "x = 1 ; y = 1";
let program = parser::parse_program(contents, "<filename>")?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert_eq!(
trailing_semicolon(stmt, &locator),
Some(Location::new(1, 6))
);
assert_eq!(trailing_semicolon(stmt, &locator), Some(TextSize::from(6)));
let contents = r#"
x = 1 \
@@ -568,10 +535,7 @@ x = 1 \
let program = parser::parse_program(contents, "<filename>")?;
let stmt = program.first().unwrap();
let locator = Locator::new(contents);
assert_eq!(
trailing_semicolon(stmt, &locator),
Some(Location::new(2, 2))
);
assert_eq!(trailing_semicolon(stmt, &locator), Some(TextSize::from(10)));
Ok(())
}
@@ -581,15 +545,15 @@ x = 1 \
let contents = "x = 1; y = 1";
let locator = Locator::new(contents);
assert_eq!(
next_stmt_break(Location::new(1, 4), &locator),
Location::new(1, 5)
next_stmt_break(TextSize::from(4), &locator),
TextSize::from(5)
);
let contents = "x = 1 ; y = 1";
let locator = Locator::new(contents);
assert_eq!(
next_stmt_break(Location::new(1, 5), &locator),
Location::new(1, 6)
next_stmt_break(TextSize::from(5), &locator),
TextSize::from(6)
);
let contents = r#"
@@ -599,8 +563,8 @@ x = 1 \
.trim();
let locator = Locator::new(contents);
assert_eq!(
next_stmt_break(Location::new(2, 2), &locator),
Location::new(2, 4)
next_stmt_break(TextSize::from(10), &locator),
TextSize::from(12)
);
}
}

View File

@@ -1,12 +1,11 @@
use std::collections::BTreeSet;
use itertools::Itertools;
use ruff_text_size::{TextRange, TextSize};
use rustc_hash::FxHashMap;
use rustpython_parser::ast::Location;
use ruff_diagnostics::{Diagnostic, Edit, Fix};
use ruff_python_ast::source_code::Locator;
use ruff_python_ast::types::Range;
use crate::linter::FixTable;
use crate::registry::{AsRule, Rule};
@@ -15,10 +14,15 @@ pub mod actions;
/// Auto-fix errors in a file, and write the fixed source code to disk.
pub fn fix_file(diagnostics: &[Diagnostic], locator: &Locator) -> Option<(String, FixTable)> {
if diagnostics.iter().all(|check| check.fix.is_empty()) {
let mut with_fixes = diagnostics
.iter()
.filter(|diag| !diag.fix.is_empty())
.peekable();
if with_fixes.peek().is_none() {
None
} else {
Some(apply_fixes(diagnostics.iter(), locator))
Some(apply_fixes(with_fixes, locator))
}
}
@@ -28,7 +32,7 @@ fn apply_fixes<'a>(
locator: &'a Locator<'a>,
) -> (String, FixTable) {
let mut output = String::with_capacity(locator.len());
let mut last_pos: Option<Location> = None;
let mut last_pos: Option<TextSize> = None;
let mut applied: BTreeSet<&Edit> = BTreeSet::default();
let mut fixed = FxHashMap::default();
@@ -52,7 +56,7 @@ fn apply_fixes<'a>(
// Best-effort approach: if this fix overlaps with a fix we've already applied,
// skip it.
if last_pos.map_or(false, |last_pos| {
fix.location()
fix.min_start()
.map_or(false, |fix_location| last_pos >= fix_location)
}) {
continue;
@@ -60,14 +64,14 @@ fn apply_fixes<'a>(
for edit in fix.edits() {
// Add all contents from `last_pos` to `fix.location`.
let slice = locator.slice(Range::new(last_pos.unwrap_or_default(), edit.location));
let slice = locator.slice(TextRange::new(last_pos.unwrap_or_default(), edit.start()));
output.push_str(slice);
// Add the patch itself.
output.push_str(&edit.content);
output.push_str(edit.content().unwrap_or_default());
// Track that the edit was applied.
last_pos = Some(edit.end_location);
last_pos = Some(edit.end());
applied.insert(edit);
}
@@ -75,7 +79,7 @@ fn apply_fixes<'a>(
}
// Add the remaining content.
let slice = locator.skip(last_pos.unwrap_or_default());
let slice = locator.after(last_pos.unwrap_or_default());
output.push_str(slice);
(output, fixed)
@@ -83,8 +87,8 @@ fn apply_fixes<'a>(
/// Compare two fixes.
fn cmp_fix(rule1: Rule, rule2: Rule, fix1: &Fix, fix2: &Fix) -> std::cmp::Ordering {
fix1.location()
.cmp(&fix2.location())
fix1.min_start()
.cmp(&fix2.min_start())
.then_with(|| match (&rule1, &rule2) {
// Apply `EndsInPeriod` fixes before `NewLineAfterLastParagraph` fixes.
(Rule::EndsInPeriod, Rule::NewLineAfterLastParagraph) => std::cmp::Ordering::Less,
@@ -95,7 +99,7 @@ fn cmp_fix(rule1: Rule, rule2: Rule, fix1: &Fix, fix2: &Fix) -> std::cmp::Orderi
#[cfg(test)]
mod tests {
use rustpython_parser::ast::Location;
use ruff_text_size::TextSize;
use ruff_diagnostics::Diagnostic;
use ruff_diagnostics::Edit;
@@ -109,8 +113,7 @@ mod tests {
.map(|edit| Diagnostic {
// The choice of rule here is arbitrary.
kind: MissingNewlineAtEndOfFile.into(),
location: edit.location,
end_location: edit.end_location,
range: edit.range(),
fix: edit.into(),
parent: None,
})
@@ -135,11 +138,11 @@ class A(object):
"#
.trim(),
);
let diagnostics = create_diagnostics([Edit {
content: "Bar".to_string(),
location: Location::new(1, 8),
end_location: Location::new(1, 14),
}]);
let diagnostics = create_diagnostics([Edit::replacement(
"Bar".to_string(),
TextSize::new(8),
TextSize::new(14),
)]);
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(
contents,
@@ -161,11 +164,7 @@ class A(object):
"#
.trim(),
);
let diagnostics = create_diagnostics([Edit {
content: String::new(),
location: Location::new(1, 7),
end_location: Location::new(1, 15),
}]);
let diagnostics = create_diagnostics([Edit::deletion(TextSize::new(7), TextSize::new(15))]);
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(
contents,
@@ -188,16 +187,8 @@ class A(object, object, object):
.trim(),
);
let diagnostics = create_diagnostics([
Edit {
content: String::new(),
location: Location::new(1, 8),
end_location: Location::new(1, 16),
},
Edit {
content: String::new(),
location: Location::new(1, 22),
end_location: Location::new(1, 30),
},
Edit::deletion(TextSize::from(8), TextSize::from(16)),
Edit::deletion(TextSize::from(22), TextSize::from(30)),
]);
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
@@ -222,16 +213,8 @@ class A(object):
.trim(),
);
let diagnostics = create_diagnostics([
Edit {
content: String::new(),
location: Location::new(1, 7),
end_location: Location::new(1, 15),
},
Edit {
content: "ignored".to_string(),
location: Location::new(1, 9),
end_location: Location::new(1, 11),
},
Edit::deletion(TextSize::from(7), TextSize::from(15)),
Edit::replacement("ignored".to_string(), TextSize::from(9), TextSize::from(11)),
]);
let (contents, fixed) = apply_fixes(diagnostics.iter(), &locator);
assert_eq!(

View File

@@ -1,14 +1,14 @@
use ruff_python_semantic::scope::ScopeStack;
use ruff_text_size::TextRange;
use rustpython_parser::ast::{Expr, Stmt};
use ruff_python_ast::types::Range;
use ruff_python_ast::types::RefEquality;
use ruff_python_semantic::analyze::visibility::{Visibility, VisibleScope};
use ruff_python_semantic::scope::ScopeId;
use crate::checkers::ast::AnnotationContext;
use crate::docstrings::definition::Definition;
type Context<'a> = (ScopeStack, Vec<RefEquality<'a, Stmt>>);
type Context<'a> = (ScopeId, Vec<RefEquality<'a, Stmt>>);
/// A collection of AST nodes that are deferred for later analysis.
/// Used to, e.g., store functions, whose bodies shouldn't be analyzed until all
@@ -16,7 +16,7 @@ type Context<'a> = (ScopeStack, Vec<RefEquality<'a, Stmt>>);
#[derive(Default)]
pub struct Deferred<'a> {
pub definitions: Vec<(Definition<'a>, Visibility, Context<'a>)>,
pub string_type_definitions: Vec<(Range, &'a str, AnnotationContext, Context<'a>)>,
pub string_type_definitions: Vec<(TextRange, &'a str, AnnotationContext, Context<'a>)>,
pub type_definitions: Vec<(&'a Expr, AnnotationContext, Context<'a>)>,
pub functions: Vec<(&'a Stmt, Context<'a>, VisibleScope)>,
pub lambdas: Vec<(&'a Expr, Context<'a>)>,

File diff suppressed because it is too large Load Diff

View File

@@ -9,6 +9,7 @@ use ruff_python_ast::helpers::to_module_path;
use ruff_python_ast::imports::{ImportMap, ModuleImport};
use ruff_python_ast::source_code::{Indexer, Locator, Stylist};
use ruff_python_ast::visitor::Visitor;
use ruff_python_stdlib::path::is_python_stub_file;
use crate::directives::IsortDirectives;
use crate::registry::Rule;
@@ -29,13 +30,11 @@ fn extract_import_map(path: &Path, package: Option<&Path>, blocks: &[&Block]) ->
for stmt in blocks.iter().flat_map(|block| &block.imports) {
match &stmt.node {
StmtKind::Import { names } => {
module_imports.extend(names.iter().map(|name| {
ModuleImport::new(
name.node.name.clone(),
stmt.location,
stmt.end_location.unwrap(),
)
}));
module_imports.extend(
names
.iter()
.map(|name| ModuleImport::new(name.node.name.clone(), stmt.range())),
);
}
StmtKind::ImportFrom {
module,
@@ -60,11 +59,7 @@ fn extract_import_map(path: &Path, package: Option<&Path>, blocks: &[&Block]) ->
Cow::Owned(module_path[..module_path.len() - level].join("."))
};
module_imports.extend(names.iter().map(|name| {
ModuleImport::new(
format!("{}.{}", module, name.node.name),
name.location,
name.end_location.unwrap(),
)
ModuleImport::new(format!("{}.{}", module, name.node.name), name.range())
}));
}
_ => panic!("Expected StmtKind::Import | StmtKind::ImportFrom"),
@@ -88,9 +83,11 @@ pub fn check_imports(
path: &Path,
package: Option<&Path>,
) -> (Vec<Diagnostic>, Option<ImportMap>) {
let is_stub = is_python_stub_file(path);
// Extract all imports from the AST.
let tracker = {
let mut tracker = ImportTracker::new(locator, directives, path);
let mut tracker = ImportTracker::new(locator, directives, is_stub);
tracker.visit_body(python_ast);
tracker
};
@@ -111,7 +108,7 @@ pub fn check_imports(
}
if settings.rules.enabled(Rule::MissingRequiredImport) {
diagnostics.extend(isort::rules::add_required_imports(
&blocks, python_ast, locator, stylist, settings, autofix,
&blocks, python_ast, locator, stylist, settings, autofix, is_stub,
));
}

View File

@@ -1,9 +1,9 @@
use rustpython_parser::ast::Location;
use ruff_text_size::TextRange;
use rustpython_parser::lexer::LexResult;
use ruff_diagnostics::{Diagnostic, Fix};
use ruff_diagnostics::{Diagnostic, DiagnosticKind, Fix};
use ruff_python_ast::source_code::{Locator, Stylist};
use ruff_python_ast::types::Range;
use ruff_python_ast::token_kind::TokenKind;
use crate::registry::{AsRule, Rule};
use crate::rules::pycodestyle::rules::logical_lines::{
@@ -37,7 +37,7 @@ pub fn check_logical_lines(
settings: &Settings,
autofix: flags::Autofix,
) -> Vec<Diagnostic> {
let mut diagnostics = vec![];
let mut context = LogicalLinesContext::new(settings);
#[cfg(feature = "logical_lines")]
let should_fix_missing_whitespace =
@@ -59,120 +59,51 @@ pub fn check_logical_lines(
for line in &LogicalLines::from_tokens(tokens, locator) {
if line.flags().contains(TokenFlags::OPERATOR) {
for (location, kind) in space_around_operator(&line) {
if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic {
kind,
location,
end_location: location,
fix: Fix::empty(),
parent: None,
});
}
}
for (location, kind) in whitespace_around_named_parameter_equals(&line.tokens()) {
if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic {
kind,
location,
end_location: location,
fix: Fix::empty(),
parent: None,
});
}
}
for (location, kind) in missing_whitespace_around_operator(&line.tokens()) {
if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic {
kind,
location,
end_location: location,
fix: Fix::empty(),
parent: None,
});
}
}
for diagnostic in missing_whitespace(&line, should_fix_missing_whitespace) {
if settings.rules.enabled(diagnostic.kind.rule()) {
diagnostics.push(diagnostic);
}
}
space_around_operator(&line, &mut context);
whitespace_around_named_parameter_equals(&line, &mut context);
missing_whitespace_around_operator(&line, &mut context);
missing_whitespace(&line, should_fix_missing_whitespace, &mut context);
}
if line
.flags()
.contains(TokenFlags::OPERATOR | TokenFlags::PUNCTUATION)
{
for (location, kind) in extraneous_whitespace(&line) {
if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic {
kind,
location,
end_location: location,
fix: Fix::empty(),
parent: None,
});
}
}
extraneous_whitespace(&line, &mut context);
}
if line.flags().contains(TokenFlags::KEYWORD) {
for (location, kind) in whitespace_around_keywords(&line) {
if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic {
kind,
location,
end_location: location,
fix: Fix::empty(),
parent: None,
});
}
}
for (location, kind) in missing_whitespace_after_keyword(&line.tokens()) {
if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic {
kind,
location,
end_location: location,
fix: Fix::empty(),
parent: None,
});
}
}
whitespace_around_keywords(&line, &mut context);
missing_whitespace_after_keyword(&line, &mut context);
}
if line.flags().contains(TokenFlags::COMMENT) {
for (range, kind) in whitespace_before_comment(&line.tokens(), locator) {
if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic {
kind,
location: range.location,
end_location: range.end_location,
fix: Fix::empty(),
parent: None,
});
}
}
whitespace_before_comment(&line, locator, prev_line.is_none(), &mut context);
}
if line.flags().contains(TokenFlags::BRACKET) {
for diagnostic in whitespace_before_parameters(
&line.tokens(),
whitespace_before_parameters(
&line,
should_fix_whitespace_before_parameters,
) {
if settings.rules.enabled(diagnostic.kind.rule()) {
diagnostics.push(diagnostic);
}
}
&mut context,
);
}
// Extract the indentation level.
let Some(start_loc) = line.first_token_location() else { continue; };
let start_line = locator.slice(Range::new(Location::new(start_loc.row(), 0), start_loc));
let indent_level = expand_indent(start_line);
let Some(first_token) = line.first_token() else {
continue;
};
let range = if first_token.kind() == TokenKind::Indent {
first_token.range()
} else {
TextRange::new(locator.line_start(first_token.start()), first_token.start())
};
let indent_level = expand_indent(locator.slice(range));
let indent_size = 4;
for (location, kind) in indentation(
for kind in indentation(
&line,
prev_line.as_ref(),
indent_char,
@@ -181,13 +112,7 @@ pub fn check_logical_lines(
indent_size,
) {
if settings.rules.enabled(kind.rule()) {
diagnostics.push(Diagnostic {
kind,
location,
end_location: location,
fix: Fix::empty(),
parent: None,
});
context.push(kind, range);
}
}
@@ -196,7 +121,40 @@ pub fn check_logical_lines(
prev_indent_level = Some(indent_level);
}
}
diagnostics
context.diagnostics
}
#[derive(Debug, Clone)]
pub(crate) struct LogicalLinesContext<'a> {
settings: &'a Settings,
diagnostics: Vec<Diagnostic>,
}
impl<'a> LogicalLinesContext<'a> {
fn new(settings: &'a Settings) -> Self {
Self {
settings,
diagnostics: Vec::new(),
}
}
pub fn push<K: Into<DiagnosticKind>>(&mut self, kind: K, range: TextRange) {
let kind = kind.into();
if self.settings.rules.enabled(kind.rule()) {
self.diagnostics.push(Diagnostic {
kind,
range,
fix: Fix::empty(),
parent: None,
});
}
}
pub fn push_diagnostic(&mut self, diagnostic: Diagnostic) {
if self.settings.rules.enabled(diagnostic.kind.rule()) {
self.diagnostics.push(diagnostic);
}
}
}
#[cfg(test)]

View File

@@ -1,15 +1,13 @@
//! `NoQA` enforcement and validation.
use nohash_hasher::IntMap;
use rustpython_parser::ast::Location;
use itertools::Itertools;
use ruff_text_size::{TextLen, TextRange, TextSize};
use ruff_diagnostics::{Diagnostic, Edit};
use ruff_python_ast::newlines::StrExt;
use ruff_python_ast::types::Range;
use ruff_python_ast::source_code::Locator;
use crate::codes::NoqaCode;
use crate::noqa;
use crate::noqa::{Directive, FileExemption};
use crate::noqa::{Directive, FileExemption, NoqaDirectives, NoqaMapping};
use crate::registry::{AsRule, Rule};
use crate::rule_redirects::get_redirect_target;
use crate::rules::ruff::rules::{UnusedCodes, UnusedNOQA};
@@ -17,37 +15,25 @@ use crate::settings::{flags, Settings};
pub fn check_noqa(
diagnostics: &mut Vec<Diagnostic>,
contents: &str,
commented_lines: &[usize],
noqa_line_for: &IntMap<usize, usize>,
locator: &Locator,
comment_ranges: &[TextRange],
noqa_line_for: &NoqaMapping,
settings: &Settings,
autofix: flags::Autofix,
) -> Vec<usize> {
let enforce_noqa = settings.rules.enabled(Rule::UnusedNOQA);
let lines: Vec<&str> = contents.universal_newlines().collect();
// Identify any codes that are globally exempted (within the current file).
let exemption = noqa::file_exemption(&lines, commented_lines);
// Map from line number to `noqa` directive on that line, along with any codes
// that were matched by the directive.
let mut noqa_directives: IntMap<usize, (Directive, Vec<NoqaCode>)> = IntMap::default();
let exemption = noqa::file_exemption(locator.contents(), comment_ranges);
// Extract all `noqa` directives.
if enforce_noqa {
for lineno in commented_lines {
noqa_directives
.entry(lineno - 1)
.or_insert_with(|| (noqa::extract_noqa_directive(lines[lineno - 1]), vec![]));
}
}
let mut noqa_directives = NoqaDirectives::from_commented_ranges(comment_ranges, locator);
// Indices of diagnostics that were ignored by a `noqa` directive.
let mut ignored_diagnostics = vec![];
// Remove any ignored diagnostics.
for (index, diagnostic) in diagnostics.iter().enumerate() {
'outer: for (index, diagnostic) in diagnostics.iter().enumerate() {
if matches!(diagnostic.kind.rule(), Rule::BlanketNOQA) {
continue;
}
@@ -68,92 +54,65 @@ pub fn check_noqa(
FileExemption::None => {}
}
let diagnostic_lineno = diagnostic.location.row();
let noqa_offsets = diagnostic
.parent
.into_iter()
.chain(std::iter::once(diagnostic.start()))
.map(|position| noqa_line_for.resolve(position))
.unique();
// Is the violation ignored by a `noqa` directive on the parent line?
if let Some(parent_lineno) = diagnostic.parent.map(|location| location.row()) {
if parent_lineno != diagnostic_lineno {
let noqa_lineno = noqa_line_for.get(&parent_lineno).unwrap_or(&parent_lineno);
if commented_lines.contains(noqa_lineno) {
let noqa = noqa_directives.entry(noqa_lineno - 1).or_insert_with(|| {
(noqa::extract_noqa_directive(lines[noqa_lineno - 1]), vec![])
});
match noqa {
(Directive::All(..), matches) => {
matches.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index);
continue;
}
(Directive::Codes(.., codes, _), matches) => {
if noqa::includes(diagnostic.kind.rule(), codes) {
matches.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index);
continue;
}
}
(Directive::None, ..) => {}
}
}
}
}
// Is the diagnostic ignored by a `noqa` directive on the same line?
let noqa_lineno = noqa_line_for
.get(&diagnostic_lineno)
.unwrap_or(&diagnostic_lineno);
if commented_lines.contains(noqa_lineno) {
let noqa = noqa_directives
.entry(noqa_lineno - 1)
.or_insert_with(|| (noqa::extract_noqa_directive(lines[noqa_lineno - 1]), vec![]));
match noqa {
(Directive::All(..), matches) => {
matches.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index);
continue;
}
(Directive::Codes(.., codes, _), matches) => {
if noqa::includes(diagnostic.kind.rule(), codes) {
matches.push(diagnostic.kind.rule().noqa_code());
for noqa_offset in noqa_offsets {
if let Some(directive_line) = noqa_directives.find_line_with_directive_mut(noqa_offset)
{
let suppressed = match &directive_line.directive {
Directive::All(..) => {
directive_line
.matches
.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index);
continue;
true
}
Directive::Codes(.., codes, _) => {
if noqa::includes(diagnostic.kind.rule(), codes) {
directive_line
.matches
.push(diagnostic.kind.rule().noqa_code());
ignored_diagnostics.push(index);
true
} else {
false
}
}
Directive::None => unreachable!(),
};
if suppressed {
continue 'outer;
}
(Directive::None, ..) => {}
}
}
}
// Enforce that the noqa directive was actually used (RUF100).
if enforce_noqa {
for (row, (directive, matches)) in noqa_directives {
match directive {
Directive::All(leading_spaces, start_byte, end_byte, trailing_spaces) => {
if matches.is_empty() {
let start_char = lines[row][..start_byte].chars().count();
let end_char =
start_char + lines[row][start_byte..end_byte].chars().count();
let mut diagnostic = Diagnostic::new(
UnusedNOQA { codes: None },
Range::new(
Location::new(row + 1, start_char),
Location::new(row + 1, end_char),
),
);
for line in noqa_directives.lines() {
match &line.directive {
Directive::All(leading_spaces, noqa_range, trailing_spaces) => {
if line.matches.is_empty() {
let mut diagnostic =
Diagnostic::new(UnusedNOQA { codes: None }, *noqa_range);
if autofix.into() && settings.rules.should_fix(diagnostic.kind.rule()) {
diagnostic.set_fix(delete_noqa(
row,
lines[row],
leading_spaces,
start_byte,
end_byte,
trailing_spaces,
*leading_spaces,
*noqa_range,
*trailing_spaces,
locator,
));
}
diagnostics.push(diagnostic);
}
}
Directive::Codes(leading_spaces, start_byte, end_byte, codes, trailing_spaces) => {
Directive::Codes(leading_spaces, range, codes, trailing_spaces) => {
let mut disabled_codes = vec![];
let mut unknown_codes = vec![];
let mut unmatched_codes = vec![];
@@ -166,7 +125,9 @@ pub fn check_noqa(
break;
}
if matches.iter().any(|m| *m == code) || settings.external.contains(code) {
if line.matches.iter().any(|m| *m == code)
|| settings.external.contains(code)
{
valid_codes.push(code);
} else {
if let Ok(rule) = Rule::from_code(code) {
@@ -189,10 +150,6 @@ pub fn check_noqa(
&& unknown_codes.is_empty()
&& unmatched_codes.is_empty())
{
let start_char = lines[row][..start_byte].chars().count();
let end_char =
start_char + lines[row][start_byte..end_byte].chars().count();
let mut diagnostic = Diagnostic::new(
UnusedNOQA {
codes: Some(UnusedCodes {
@@ -210,26 +167,20 @@ pub fn check_noqa(
.collect(),
}),
},
Range::new(
Location::new(row + 1, start_char),
Location::new(row + 1, end_char),
),
*range,
);
if autofix.into() && settings.rules.should_fix(diagnostic.kind.rule()) {
if valid_codes.is_empty() {
diagnostic.set_fix(delete_noqa(
row,
lines[row],
leading_spaces,
start_byte,
end_byte,
trailing_spaces,
*leading_spaces,
*range,
*trailing_spaces,
locator,
));
} else {
diagnostic.set_fix(Edit::replacement(
diagnostic.set_fix(Edit::range_replacement(
format!("# noqa: {}", valid_codes.join(", ")),
Location::new(row + 1, start_char),
Location::new(row + 1, end_char),
*range,
));
}
}
@@ -247,39 +198,37 @@ pub fn check_noqa(
/// Generate a [`Edit`] to delete a `noqa` directive.
fn delete_noqa(
row: usize,
line: &str,
leading_spaces: usize,
start_byte: usize,
end_byte: usize,
trailing_spaces: usize,
leading_spaces: TextSize,
noqa_range: TextRange,
trailing_spaces: TextSize,
locator: &Locator,
) -> Edit {
if start_byte - leading_spaces == 0 && end_byte == line.len() {
// Ex) `# noqa`
Edit::deletion(Location::new(row + 1, 0), Location::new(row + 2, 0))
} else if end_byte == line.len() {
// Ex) `x = 1 # noqa`
let start_char = line[..start_byte].chars().count();
let end_char = start_char + line[start_byte..end_byte].chars().count();
Edit::deletion(
Location::new(row + 1, start_char - leading_spaces),
Location::new(row + 1, end_char + trailing_spaces),
let line_range = locator.line_range(noqa_range.start());
// Ex) `# noqa`
if line_range
== TextRange::new(
noqa_range.start() - leading_spaces,
noqa_range.end() + trailing_spaces,
)
} else if line[end_byte..].trim_start().starts_with('#') {
// Ex) `x = 1 # noqa # type: ignore`
let start_char = line[..start_byte].chars().count();
let end_char = start_char + line[start_byte..end_byte].chars().count();
{
let full_line_end = locator.full_line_end(line_range.end());
Edit::deletion(line_range.start(), full_line_end)
}
// Ex) `x = 1 # noqa`
else if noqa_range.end() + trailing_spaces == line_range.end() {
Edit::deletion(noqa_range.start() - leading_spaces, line_range.end())
}
// Ex) `x = 1 # noqa # type: ignore`
else if locator.contents()[usize::from(noqa_range.end() + trailing_spaces)..].starts_with('#')
{
Edit::deletion(noqa_range.start(), noqa_range.end() + trailing_spaces)
}
// Ex) `x = 1 # noqa here`
else {
Edit::deletion(
Location::new(row + 1, start_char),
Location::new(row + 1, end_char + trailing_spaces),
)
} else {
// Ex) `x = 1 # noqa here`
let start_char = line[..start_byte].chars().count();
let end_char = start_char + line[start_byte..end_byte].chars().count();
Edit::deletion(
Location::new(row + 1, start_char + 1 + 1),
Location::new(row + 1, end_char + trailing_spaces),
noqa_range.start() + "# ".text_len(),
noqa_range.end() + trailing_spaces,
)
}
}

View File

@@ -1,10 +1,11 @@
//! Lint rules based on checking physical lines.
use ruff_text_size::TextSize;
use std::path::Path;
use ruff_diagnostics::Diagnostic;
use ruff_python_ast::newlines::StrExt;
use ruff_python_ast::source_code::{Locator, Stylist};
use ruff_python_ast::source_code::{Indexer, Locator, Stylist};
use crate::registry::Rule;
use crate::rules::flake8_executable::helpers::{extract_shebang, ShebangDirective};
@@ -24,8 +25,8 @@ pub fn check_physical_lines(
path: &Path,
locator: &Locator,
stylist: &Stylist,
commented_lines: &[usize],
doc_lines: &[usize],
indexer: &Indexer,
doc_lines: &[TextSize],
settings: &Settings,
autofix: flags::Autofix,
) -> Vec<Diagnostic> {
@@ -55,17 +56,19 @@ pub fn check_physical_lines(
let fix_shebang_whitespace =
autofix.into() && settings.rules.should_fix(Rule::ShebangLeadingWhitespace);
let mut commented_lines_iter = commented_lines.iter().peekable();
let mut commented_lines_iter = indexer.comment_ranges().iter().peekable();
let mut doc_lines_iter = doc_lines.iter().peekable();
let string_lines = indexer.triple_quoted_string_ranges();
for (index, line) in locator.contents().universal_newlines().enumerate() {
while commented_lines_iter
.next_if(|lineno| &(index + 1) == *lineno)
.next_if(|comment_range| line.range().contains_range(**comment_range))
.is_some()
{
if enforce_unnecessary_coding_comment {
if index < 2 {
if let Some(diagnostic) =
unnecessary_coding_comment(index, line, fix_unnecessary_coding_comment)
unnecessary_coding_comment(&line, fix_unnecessary_coding_comment)
{
diagnostics.push(diagnostic);
}
@@ -73,15 +76,11 @@ pub fn check_physical_lines(
}
if enforce_blanket_type_ignore {
if let Some(diagnostic) = blanket_type_ignore(index, line) {
diagnostics.push(diagnostic);
}
blanket_type_ignore(&mut diagnostics, &line);
}
if enforce_blanket_noqa {
if let Some(diagnostic) = blanket_noqa(index, line) {
diagnostics.push(diagnostic);
}
blanket_noqa(&mut diagnostics, &line);
}
if enforce_shebang_missing
@@ -90,31 +89,31 @@ pub fn check_physical_lines(
|| enforce_shebang_newline
|| enforce_shebang_python
{
let shebang = extract_shebang(line);
let shebang = extract_shebang(&line);
if enforce_shebang_not_executable {
if let Some(diagnostic) = shebang_not_executable(path, index, &shebang) {
if let Some(diagnostic) = shebang_not_executable(path, line.range(), &shebang) {
diagnostics.push(diagnostic);
}
}
if enforce_shebang_missing {
if !has_any_shebang && matches!(shebang, ShebangDirective::Match(_, _, _, _)) {
if !has_any_shebang && matches!(shebang, ShebangDirective::Match(..)) {
has_any_shebang = true;
}
}
if enforce_shebang_whitespace {
if let Some(diagnostic) =
shebang_whitespace(index, &shebang, fix_shebang_whitespace)
shebang_whitespace(line.range(), &shebang, fix_shebang_whitespace)
{
diagnostics.push(diagnostic);
}
}
if enforce_shebang_newline {
if let Some(diagnostic) = shebang_newline(index, &shebang) {
if let Some(diagnostic) = shebang_newline(line.range(), &shebang, index == 0) {
diagnostics.push(diagnostic);
}
}
if enforce_shebang_python {
if let Some(diagnostic) = shebang_python(index, &shebang) {
if let Some(diagnostic) = shebang_python(line.range(), &shebang) {
diagnostics.push(diagnostic);
}
}
@@ -122,40 +121,40 @@ pub fn check_physical_lines(
}
while doc_lines_iter
.next_if(|lineno| &(index + 1) == *lineno)
.next_if(|doc_line_start| line.range().contains(**doc_line_start))
.is_some()
{
if enforce_doc_line_too_long {
if let Some(diagnostic) = doc_line_too_long(index, line, settings) {
if let Some(diagnostic) = doc_line_too_long(&line, settings) {
diagnostics.push(diagnostic);
}
}
}
if enforce_mixed_spaces_and_tabs {
if let Some(diagnostic) = mixed_spaces_and_tabs(index, line) {
if let Some(diagnostic) = mixed_spaces_and_tabs(&line) {
diagnostics.push(diagnostic);
}
}
if enforce_line_too_long {
if let Some(diagnostic) = line_too_long(index, line, settings) {
if let Some(diagnostic) = line_too_long(&line, settings) {
diagnostics.push(diagnostic);
}
}
if enforce_bidirectional_unicode {
diagnostics.extend(pylint::rules::bidirectional_unicode(index, line));
diagnostics.extend(pylint::rules::bidirectional_unicode(&line));
}
if enforce_trailing_whitespace || enforce_blank_line_contains_whitespace {
if let Some(diagnostic) = trailing_whitespace(index, line, settings, autofix) {
if let Some(diagnostic) = trailing_whitespace(&line, settings, autofix) {
diagnostics.push(diagnostic);
}
}
if enforce_tab_indentation {
if let Some(diagnostic) = tab_indentation(index, line) {
if let Some(diagnostic) = tab_indentation(&line, string_lines) {
diagnostics.push(diagnostic);
}
}
@@ -186,7 +185,7 @@ mod tests {
use rustpython_parser::Mode;
use std::path::Path;
use ruff_python_ast::source_code::{Locator, Stylist};
use ruff_python_ast::source_code::{Indexer, Locator, Stylist};
use crate::registry::Rule;
use crate::settings::{flags, Settings};
@@ -198,6 +197,7 @@ mod tests {
let line = "'\u{4e9c}' * 2"; // 7 in UTF-32, 9 in UTF-8.
let locator = Locator::new(line);
let tokens: Vec<_> = lex(line, Mode::Module).collect();
let indexer = Indexer::from_tokens(&tokens, &locator);
let stylist = Stylist::from_tokens(&tokens, &locator);
let check_with_max_line_length = |line_length: usize| {
@@ -205,7 +205,7 @@ mod tests {
Path::new("foo.py"),
&locator,
&stylist,
&[],
&indexer,
&[],
&Settings {
line_length,

View File

@@ -64,7 +64,7 @@ pub fn check_tokens(
// RUF001, RUF002, RUF003
if enforce_ambiguous_unicode_character {
let mut state_machine = StateMachine::default();
for &(start, ref tok, end) in tokens.iter().flatten() {
for &(ref tok, range) in tokens.iter().flatten() {
let is_docstring = if enforce_ambiguous_unicode_character {
state_machine.consume(tok)
} else {
@@ -74,8 +74,7 @@ pub fn check_tokens(
if matches!(tok, Tok::String { .. } | Tok::Comment(_)) {
diagnostics.extend(ruff::rules::ambiguous_unicode_character(
locator,
start,
end,
range,
if matches!(tok, Tok::String { .. }) {
if is_docstring {
Context::Docstring
@@ -94,10 +93,10 @@ pub fn check_tokens(
// ERA001
if enforce_commented_out_code {
for (start, tok, end) in tokens.iter().flatten() {
for (tok, range) in tokens.iter().flatten() {
if matches!(tok, Tok::Comment(_)) {
if let Some(diagnostic) =
eradicate::rules::commented_out_code(locator, *start, *end, settings, autofix)
eradicate::rules::commented_out_code(locator, *range, settings, autofix)
{
diagnostics.push(diagnostic);
}
@@ -107,12 +106,11 @@ pub fn check_tokens(
// W605
if enforce_invalid_escape_sequence {
for (start, tok, end) in tokens.iter().flatten() {
for (tok, range) in tokens.iter().flatten() {
if matches!(tok, Tok::String { .. }) {
diagnostics.extend(pycodestyle::rules::invalid_escape_sequence(
locator,
*start,
*end,
*range,
autofix.into() && settings.rules.should_fix(Rule::InvalidEscapeSequence),
));
}
@@ -120,10 +118,10 @@ pub fn check_tokens(
}
// PLE2510, PLE2512, PLE2513
if enforce_invalid_string_character {
for (start, tok, end) in tokens.iter().flatten() {
for (tok, range) in tokens.iter().flatten() {
if matches!(tok, Tok::String { .. }) {
diagnostics.extend(
pylint::rules::invalid_string_characters(locator, *start, *end, autofix.into())
pylint::rules::invalid_string_characters(locator, *range, autofix.into())
.into_iter()
.filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())),
);
@@ -155,6 +153,7 @@ pub fn check_tokens(
flake8_implicit_str_concat::rules::implicit(
tokens,
&settings.flake8_implicit_str_concat,
locator,
)
.into_iter()
.filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())),

View File

@@ -1,8 +1,15 @@
use crate::registry::{Linter, Rule};
use std::fmt::Formatter;
#[derive(PartialEq, Eq, PartialOrd, Ord)]
pub struct NoqaCode(&'static str, &'static str);
impl std::fmt::Debug for NoqaCode {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::fmt::Display for NoqaCode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
write!(f, "{}{}", self.0, self.1)
@@ -202,6 +209,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<Rule> {
(Pylint, "W0711") => Rule::BinaryOpException,
(Pylint, "W1508") => Rule::InvalidEnvvarDefault,
(Pylint, "W2901") => Rule::RedefinedLoopName,
(Pylint, "E0302") => Rule::UnexpectedSpecialMethodSignature,
// flake8-builtins
(Flake8Builtins, "001") => Rule::BuiltinVariableShadowing,
@@ -263,6 +271,8 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<Rule> {
(Flake8Comprehensions, "15") => Rule::UnnecessarySubscriptReversal,
(Flake8Comprehensions, "16") => Rule::UnnecessaryComprehension,
(Flake8Comprehensions, "17") => Rule::UnnecessaryMap,
(Flake8Comprehensions, "18") => Rule::UnnecessaryLiteralWithinDictCall,
(Flake8Comprehensions, "19") => Rule::UnnecessaryComprehensionAnyAll,
// flake8-debugger
(Flake8Debugger, "0") => Rule::Debugger,
@@ -507,6 +517,12 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<Rule> {
(Flake8Bandit, "506") => Rule::UnsafeYAMLLoad,
(Flake8Bandit, "508") => Rule::SnmpInsecureVersion,
(Flake8Bandit, "509") => Rule::SnmpWeakCryptography,
(Flake8Bandit, "602") => Rule::SubprocessPopenWithShellEqualsTrue,
(Flake8Bandit, "603") => Rule::SubprocessWithoutShellEqualsTrue,
(Flake8Bandit, "604") => Rule::CallWithShellEqualsTrue,
(Flake8Bandit, "605") => Rule::StartProcessWithAShell,
(Flake8Bandit, "606") => Rule::StartProcessWithNoShell,
(Flake8Bandit, "607") => Rule::StartProcessWithPartialPath,
(Flake8Bandit, "608") => Rule::HardcodedSQLExpression,
(Flake8Bandit, "612") => Rule::LoggingConfigInsecureListen,
(Flake8Bandit, "701") => Rule::Jinja2AutoescapeFalse,
@@ -525,6 +541,8 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<Rule> {
// flake8-import-conventions
(Flake8ImportConventions, "001") => Rule::UnconventionalImportAlias,
(Flake8ImportConventions, "002") => Rule::BannedImportAlias,
(Flake8ImportConventions, "003") => Rule::BannedImportFrom,
// flake8-datetimez
(Flake8Datetimez, "001") => Rule::CallDatetimeWithoutTzinfo,
@@ -573,6 +591,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<Rule> {
(Flake8Pyi, "012") => Rule::PassInClassBody,
(Flake8Pyi, "014") => Rule::ArgumentDefaultInStub,
(Flake8Pyi, "015") => Rule::AssignmentDefaultInStub,
(Flake8Pyi, "016") => Rule::DuplicateUnionMember,
(Flake8Pyi, "021") => Rule::DocstringInStub,
(Flake8Pyi, "033") => Rule::TypeCommentInStub,
@@ -608,7 +627,6 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<Rule> {
(Flake8Pie, "794") => Rule::DuplicateClassFieldDefinition,
(Flake8Pie, "796") => Rule::NonUniqueEnums,
(Flake8Pie, "800") => Rule::UnnecessarySpread,
(Flake8Pie, "802") => Rule::UnnecessaryComprehensionAnyAll,
(Flake8Pie, "804") => Rule::UnnecessaryDictKwargs,
(Flake8Pie, "807") => Rule::ReimplementedListBuiltin,
(Flake8Pie, "810") => Rule::MultipleStartsEndsWith,
@@ -700,6 +718,8 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<Rule> {
(Ruff, "005") => Rule::CollectionLiteralConcatenation,
(Ruff, "006") => Rule::AsyncioDanglingTask,
(Ruff, "007") => Rule::PairwiseOverZipped,
(Ruff, "008") => Rule::MutableDataclassDefault,
(Ruff, "009") => Rule::FunctionCallInDataclassDefaultArgument,
(Ruff, "100") => Rule::UnusedNOQA,
// flake8-django

View File

@@ -1,16 +1,18 @@
//! Extract `# noqa` and `# isort: skip` directives from tokenized source.
use crate::noqa::NoqaMapping;
use bitflags::bitflags;
use nohash_hasher::{IntMap, IntSet};
use rustpython_parser::ast::Location;
use ruff_python_ast::source_code::{Indexer, Locator};
use ruff_text_size::{TextLen, TextRange, TextSize};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use crate::settings::Settings;
bitflags! {
pub struct Flags: u32 {
const NOQA = 0b0000_0001;
#[derive(Debug, Copy, Clone)]
pub struct Flags: u8 {
const NOQA = 0b0000_0001;
const ISORT = 0b0000_0010;
}
}
@@ -29,27 +31,50 @@ impl Flags {
}
}
#[derive(Default)]
#[derive(Default, Debug)]
pub struct IsortDirectives {
pub exclusions: IntSet<usize>,
pub splits: Vec<usize>,
/// Ranges for which sorting is disabled
pub exclusions: Vec<TextRange>,
/// Text positions at which splits should be inserted
pub splits: Vec<TextSize>,
pub skip_file: bool,
}
impl IsortDirectives {
pub fn is_excluded(&self, offset: TextSize) -> bool {
for range in &self.exclusions {
if range.contains(offset) {
return true;
}
if range.start() > offset {
break;
}
}
false
}
}
pub struct Directives {
pub noqa_line_for: IntMap<usize, usize>,
pub noqa_line_for: NoqaMapping,
pub isort: IsortDirectives,
}
pub fn extract_directives(lxr: &[LexResult], flags: Flags) -> Directives {
pub fn extract_directives(
lxr: &[LexResult],
flags: Flags,
locator: &Locator,
indexer: &Indexer,
) -> Directives {
Directives {
noqa_line_for: if flags.contains(Flags::NOQA) {
extract_noqa_line_for(lxr)
extract_noqa_line_for(lxr, locator, indexer)
} else {
IntMap::default()
NoqaMapping::default()
},
isort: if flags.contains(Flags::ISORT) {
extract_isort_directives(lxr)
extract_isort_directives(lxr, locator)
} else {
IsortDirectives::default()
},
@@ -57,48 +82,92 @@ pub fn extract_directives(lxr: &[LexResult], flags: Flags) -> Directives {
}
/// Extract a mapping from logical line to noqa line.
pub fn extract_noqa_line_for(lxr: &[LexResult]) -> IntMap<usize, usize> {
let mut noqa_line_for: IntMap<usize, usize> = IntMap::default();
let mut prev_non_newline: Option<(&Location, &Tok, &Location)> = None;
for (start, tok, end) in lxr.iter().flatten() {
if matches!(tok, Tok::EndOfFile) {
break;
}
// For multi-line strings, we expect `noqa` directives on the last line of the
// string.
if matches!(tok, Tok::String { .. }) && end.row() > start.row() {
for i in start.row()..end.row() {
noqa_line_for.insert(i, end.row());
pub fn extract_noqa_line_for(
lxr: &[LexResult],
locator: &Locator,
indexer: &Indexer,
) -> NoqaMapping {
let mut string_mappings = Vec::new();
for (tok, range) in lxr.iter().flatten() {
match tok {
Tok::EndOfFile => {
break;
}
}
// For continuations, we expect `noqa` directives on the last line of the
// continuation.
if matches!(
tok,
Tok::Newline | Tok::NonLogicalNewline | Tok::Comment(..)
) {
if let Some((.., end)) = prev_non_newline {
for i in end.row()..start.row() {
noqa_line_for.insert(i, start.row());
// For multi-line strings, we expect `noqa` directives on the last line of the
// string.
Tok::String {
triple_quoted: true,
..
} => {
if locator.contains_line_break(*range) {
string_mappings.push(*range);
}
}
prev_non_newline = None;
} else if prev_non_newline.is_none() {
prev_non_newline = Some((start, tok, end));
_ => {}
}
}
noqa_line_for
let mut continuation_mappings = Vec::new();
// For continuations, we expect `noqa` directives on the last line of the
// continuation.
let mut last: Option<TextRange> = None;
for continuation_line in indexer.continuation_line_starts() {
let line_end = locator.full_line_end(*continuation_line);
if let Some(last_range) = last.take() {
if last_range.end() == *continuation_line {
last = Some(TextRange::new(last_range.start(), line_end));
continue;
}
// new continuation
continuation_mappings.push(last_range);
}
last = Some(TextRange::new(*continuation_line, line_end));
}
if let Some(last_range) = last.take() {
continuation_mappings.push(last_range);
}
// Merge the mappings in sorted order
let mut mappings =
NoqaMapping::with_capacity(continuation_mappings.len() + string_mappings.len());
let mut continuation_mappings = continuation_mappings.into_iter().peekable();
let mut string_mappings = string_mappings.into_iter().peekable();
while let (Some(continuation), Some(string)) =
(continuation_mappings.peek(), string_mappings.peek())
{
if continuation.start() <= string.start() {
mappings.push_mapping(continuation_mappings.next().unwrap());
} else {
mappings.push_mapping(string_mappings.next().unwrap());
}
}
for mapping in continuation_mappings {
mappings.push_mapping(mapping);
}
for mapping in string_mappings {
mappings.push_mapping(mapping);
}
mappings
}
/// Extract a set of lines over which to disable isort.
pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
let mut exclusions: IntSet<usize> = IntSet::default();
let mut splits: Vec<usize> = Vec::default();
let mut off: Option<Location> = None;
let mut last: Option<Location> = None;
for &(start, ref tok, end) in lxr.iter().flatten() {
last = Some(end);
/// Extract a set of ranges over which to disable isort.
pub fn extract_isort_directives(lxr: &[LexResult], locator: &Locator) -> IsortDirectives {
let mut exclusions: Vec<TextRange> = Vec::default();
let mut splits: Vec<TextSize> = Vec::default();
let mut off: Option<TextSize> = None;
for &(ref tok, range) in lxr.iter().flatten() {
let Tok::Comment(comment_text) = tok else {
continue;
};
@@ -108,7 +177,7 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
// required to include the space, and must appear on their own lines.
let comment_text = comment_text.trim_end();
if matches!(comment_text, "# isort: split" | "# ruff: isort: split") {
splits.push(start.row());
splits.push(range.start());
} else if matches!(
comment_text,
"# isort: skip_file"
@@ -122,30 +191,25 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
};
} else if off.is_some() {
if comment_text == "# isort: on" || comment_text == "# ruff: isort: on" {
if let Some(start) = off {
for row in start.row() + 1..=end.row() {
exclusions.insert(row);
}
if let Some(exclusion_start) = off {
exclusions.push(TextRange::new(exclusion_start, range.start()));
}
off = None;
}
} else {
if comment_text.contains("isort: skip") || comment_text.contains("isort:skip") {
exclusions.insert(start.row());
exclusions.push(locator.line_range(range.start()));
} else if comment_text == "# isort: off" || comment_text == "# ruff: isort: off" {
off = Some(start);
off = Some(range.start());
}
}
}
if let Some(start) = off {
// Enforce unterminated `isort: off`.
if let Some(end) = last {
for row in start.row() + 1..=end.row() {
exclusions.insert(row);
}
}
exclusions.push(TextRange::new(start, locator.contents().text_len()));
}
IsortDirectives {
exclusions,
splits,
@@ -155,120 +219,98 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
#[cfg(test)]
mod tests {
use nohash_hasher::{IntMap, IntSet};
use ruff_python_ast::source_code::{Indexer, Locator};
use ruff_text_size::{TextLen, TextRange, TextSize};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::{lexer, Mode};
use crate::directives::{extract_isort_directives, extract_noqa_line_for};
use crate::noqa::NoqaMapping;
fn noqa_mappings(contents: &str) -> NoqaMapping {
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let indexer = Indexer::from_tokens(&lxr, &locator);
extract_noqa_line_for(&lxr, &locator, &indexer)
}
#[test]
fn noqa_extraction() {
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
y = 2
z = x + 1",
Mode::Module,
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
let contents = "x = 1
y = 2 \
+ 1
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(
"
assert_eq!(noqa_mappings(contents), NoqaMapping::default());
let contents = "
x = 1
y = 2
z = x + 1",
Mode::Module,
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
z = x + 1";
assert_eq!(noqa_mappings(contents), NoqaMapping::default());
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
let contents = "x = 1
y = 2
z = x + 1
",
Mode::Module,
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
";
assert_eq!(noqa_mappings(contents), NoqaMapping::default());
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
let contents = "x = 1
y = 2
z = x + 1
",
Mode::Module,
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::default());
";
assert_eq!(noqa_mappings(contents), NoqaMapping::default());
let lxr: Vec<LexResult> = lexer::lex(
"x = '''abc
let contents = "x = '''abc
def
ghi
'''
y = 2
z = x + 1",
Mode::Module,
)
.collect();
z = x + 1";
assert_eq!(
extract_noqa_line_for(&lxr),
IntMap::from_iter([(1, 4), (2, 4), (3, 4)])
noqa_mappings(contents),
NoqaMapping::from_iter([TextRange::new(TextSize::from(4), TextSize::from(22)),])
);
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
let contents = "x = 1
y = '''abc
def
ghi
'''
z = 2",
Mode::Module,
)
.collect();
z = 2";
assert_eq!(
extract_noqa_line_for(&lxr),
IntMap::from_iter([(2, 5), (3, 5), (4, 5)])
noqa_mappings(contents),
NoqaMapping::from_iter([TextRange::new(TextSize::from(10), TextSize::from(28))])
);
let lxr: Vec<LexResult> = lexer::lex(
"x = 1
let contents = "x = 1
y = '''abc
def
ghi
'''",
Mode::Module,
)
.collect();
'''";
assert_eq!(
extract_noqa_line_for(&lxr),
IntMap::from_iter([(2, 5), (3, 5), (4, 5)])
noqa_mappings(contents),
NoqaMapping::from_iter([TextRange::new(TextSize::from(10), TextSize::from(28))])
);
let lxr: Vec<LexResult> = lexer::lex(
r#"x = \
1"#,
Mode::Module,
)
.collect();
assert_eq!(extract_noqa_line_for(&lxr), IntMap::from_iter([(1, 2)]));
let contents = r#"x = \
1"#;
assert_eq!(
noqa_mappings(contents),
NoqaMapping::from_iter([TextRange::new(TextSize::from(0), TextSize::from(6))])
);
let lxr: Vec<LexResult> = lexer::lex(
r#"from foo import \
let contents = r#"from foo import \
bar as baz, \
qux as quux"#,
Mode::Module,
)
.collect();
qux as quux"#;
assert_eq!(
extract_noqa_line_for(&lxr),
IntMap::from_iter([(1, 3), (2, 3)])
noqa_mappings(contents),
NoqaMapping::from_iter([TextRange::new(TextSize::from(0), TextSize::from(36))])
);
let lxr: Vec<LexResult> = lexer::lex(
r#"
let contents = r#"
# Foo
from foo import \
bar as baz, \
@@ -276,13 +318,14 @@ from foo import \
x = \
1
y = \
2"#,
Mode::Module,
)
.collect();
2"#;
assert_eq!(
extract_noqa_line_for(&lxr),
IntMap::from_iter([(3, 5), (4, 5), (6, 7), (8, 9)])
noqa_mappings(contents),
NoqaMapping::from_iter([
TextRange::new(TextSize::from(7), TextSize::from(43)),
TextRange::new(TextSize::from(65), TextSize::from(71)),
TextRange::new(TextSize::from(77), TextSize::from(83)),
])
);
}
@@ -292,7 +335,10 @@ y = \
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::default()
);
let contents = "# isort: off
x = 1
@@ -301,8 +347,8 @@ y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr).exclusions,
IntSet::from_iter([2, 3, 4])
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::from_iter([TextRange::new(TextSize::from(0), TextSize::from(25))])
);
let contents = "# isort: off
@@ -314,8 +360,8 @@ z = x + 1
# isort: on";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr).exclusions,
IntSet::from_iter([2, 3, 4, 5])
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::from_iter([TextRange::new(TextSize::from(0), TextSize::from(38))])
);
let contents = "# isort: off
@@ -324,8 +370,8 @@ y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(
extract_isort_directives(&lxr).exclusions,
IntSet::from_iter([2, 3, 4])
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::from_iter([TextRange::at(TextSize::from(0), contents.text_len())])
);
let contents = "# isort: skip_file
@@ -333,7 +379,10 @@ x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::default()
);
let contents = "# isort: off
x = 1
@@ -342,7 +391,10 @@ y = 2
# isort: skip_file
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).exclusions,
Vec::default()
);
}
#[test]
@@ -351,19 +403,28 @@ z = x + 1";
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).splits, Vec::<usize>::new());
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).splits,
Vec::new()
);
let contents = "x = 1
y = 2
# isort: split
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).splits, vec![3]);
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).splits,
vec![TextSize::from(12)]
);
let contents = "x = 1
y = 2 # isort: split
z = x + 1";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
assert_eq!(extract_isort_directives(&lxr).splits, vec![2]);
assert_eq!(
extract_isort_directives(&lxr, &Locator::new(contents)).splits,
vec![TextSize::from(13)]
);
}
}

View File

@@ -1,8 +1,10 @@
//! Doc line extraction. In this context, a doc line is a line consisting of a
//! standalone comment or a constant string statement.
use ruff_text_size::{TextRange, TextSize};
use std::iter::FusedIterator;
use ruff_python_ast::source_code::Locator;
use rustpython_parser::ast::{Constant, ExprKind, Stmt, StmtKind, Suite};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
@@ -11,46 +13,56 @@ use ruff_python_ast::visitor;
use ruff_python_ast::visitor::Visitor;
/// Extract doc lines (standalone comments) from a token sequence.
pub fn doc_lines_from_tokens(lxr: &[LexResult]) -> DocLines {
DocLines::new(lxr)
pub fn doc_lines_from_tokens<'a>(lxr: &'a [LexResult], locator: &'a Locator<'a>) -> DocLines<'a> {
DocLines::new(lxr, locator)
}
pub struct DocLines<'a> {
inner: std::iter::Flatten<core::slice::Iter<'a, LexResult>>,
prev: Option<usize>,
locator: &'a Locator<'a>,
prev: TextSize,
}
impl<'a> DocLines<'a> {
fn new(lxr: &'a [LexResult]) -> Self {
fn new(lxr: &'a [LexResult], locator: &'a Locator) -> Self {
Self {
inner: lxr.iter().flatten(),
prev: None,
locator,
prev: TextSize::default(),
}
}
}
impl Iterator for DocLines<'_> {
type Item = usize;
type Item = TextSize;
fn next(&mut self) -> Option<Self::Item> {
let mut at_start_of_line = true;
loop {
let (start, tok, end) = self.inner.next()?;
let (tok, range) = self.inner.next()?;
match tok {
Tok::Indent | Tok::Dedent | Tok::Newline => continue,
Tok::Comment(..) => {
if let Some(prev) = self.prev {
if start.row() > prev {
break Some(start.row());
}
} else {
break Some(start.row());
if at_start_of_line
|| self
.locator
.contains_line_break(TextRange::new(self.prev, range.start()))
{
break Some(range.start());
}
}
_ => {}
Tok::Newline => {
at_start_of_line = true;
}
Tok::Indent | Tok::Dedent => {
// ignore
}
_ => {
at_start_of_line = false;
}
}
self.prev = Some(end.row());
self.prev = range.end();
}
}
}
@@ -59,7 +71,7 @@ impl FusedIterator for DocLines<'_> {}
#[derive(Default)]
struct StringLinesVisitor {
string_lines: Vec<usize>,
string_lines: Vec<TextSize>,
}
impl Visitor<'_> for StringLinesVisitor {
@@ -70,16 +82,15 @@ impl Visitor<'_> for StringLinesVisitor {
..
} = &value.node
{
self.string_lines
.extend(value.location.row()..=value.end_location.unwrap().row());
self.string_lines.push(value.start());
}
}
visitor::walk_stmt(self, stmt);
}
}
/// Extract doc lines (standalone strings) from an AST.
pub fn doc_lines_from_ast(python_ast: &Suite) -> Vec<usize> {
/// Extract doc lines (standalone strings) start positions from an AST.
pub fn doc_lines_from_ast(python_ast: &Suite) -> Vec<TextSize> {
let mut visitor = StringLinesVisitor::default();
visitor.visit_body(python_ast);
visitor.string_lines

View File

@@ -1,4 +1,7 @@
use ruff_text_size::{TextRange, TextSize};
use rustpython_parser::ast::{Expr, Stmt};
use std::fmt::{Debug, Formatter};
use std::ops::Deref;
use ruff_python_semantic::analyze::visibility::{
class_visibility, function_visibility, method_visibility, Modifier, Visibility, VisibleScope,
@@ -25,11 +28,78 @@ pub struct Definition<'a> {
pub struct Docstring<'a> {
pub kind: DefinitionKind<'a>,
pub expr: &'a Expr,
/// The content of the docstring, including the leading and trailing quotes.
pub contents: &'a str,
pub body: &'a str,
/// The range of the docstring body (without the quotes). The range is relative to [`Self::contents`].
pub body_range: TextRange,
pub indentation: &'a str,
}
impl<'a> Docstring<'a> {
pub fn body(&self) -> DocstringBody {
DocstringBody { docstring: self }
}
pub const fn start(&self) -> TextSize {
self.expr.start()
}
pub const fn end(&self) -> TextSize {
self.expr.end()
}
pub const fn range(&self) -> TextRange {
self.expr.range()
}
pub fn leading_quote(&self) -> &'a str {
&self.contents[TextRange::up_to(self.body_range.start())]
}
}
#[derive(Copy, Clone)]
pub struct DocstringBody<'a> {
docstring: &'a Docstring<'a>,
}
impl<'a> DocstringBody<'a> {
#[inline]
pub fn start(self) -> TextSize {
self.range().start()
}
#[inline]
pub fn end(self) -> TextSize {
self.range().end()
}
pub fn range(self) -> TextRange {
self.docstring.body_range + self.docstring.start()
}
pub fn as_str(self) -> &'a str {
&self.docstring.contents[self.docstring.body_range]
}
}
impl Deref for DocstringBody<'_> {
type Target = str;
fn deref(&self) -> &Self::Target {
self.as_str()
}
}
impl Debug for DocstringBody<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DocstringBody")
.field("text", &self.as_str())
.field("range", &self.range())
.finish()
}
}
#[derive(Copy, Clone)]
pub enum Documentable {
Class,

View File

@@ -26,6 +26,8 @@ pub(crate) static GOOGLE_SECTIONS: &[SectionKind] = &[
SectionKind::KeywordArguments,
SectionKind::Note,
SectionKind::Notes,
SectionKind::OtherArgs,
SectionKind::OtherArguments,
SectionKind::Return,
SectionKind::Tip,
SectionKind::Todo,

View File

@@ -14,6 +14,7 @@ pub(crate) static NUMPY_SECTIONS: &[SectionKind] = &[
SectionKind::Yields,
// NumPy-only
SectionKind::ExtendedSummary,
SectionKind::OtherParams,
SectionKind::OtherParameters,
SectionKind::Parameters,
SectionKind::ShortSummary,

View File

@@ -1,5 +1,10 @@
use ruff_python_ast::newlines::{StrExt, UniversalNewlineIterator};
use ruff_text_size::{TextLen, TextRange, TextSize};
use std::fmt::{Debug, Formatter};
use std::iter::FusedIterator;
use strum_macros::EnumIter;
use crate::docstrings::definition::{Docstring, DocstringBody};
use crate::docstrings::styles::SectionStyle;
use ruff_python_ast::whitespace;
@@ -22,6 +27,9 @@ pub enum SectionKind {
Methods,
Note,
Notes,
OtherArgs,
OtherArguments,
OtherParams,
OtherParameters,
Parameters,
Raises,
@@ -59,6 +67,9 @@ impl SectionKind {
"methods" => Some(Self::Methods),
"note" => Some(Self::Note),
"notes" => Some(Self::Notes),
"other args" => Some(Self::OtherArgs),
"other arguments" => Some(Self::OtherArguments),
"other params" => Some(Self::OtherParams),
"other parameters" => Some(Self::OtherParameters),
"parameters" => Some(Self::Parameters),
"raises" => Some(Self::Raises),
@@ -97,6 +108,9 @@ impl SectionKind {
Self::Methods => "Methods",
Self::Note => "Note",
Self::Notes => "Notes",
Self::OtherArgs => "Other Args",
Self::OtherArguments => "Other Arguments",
Self::OtherParams => "Other Params",
Self::OtherParameters => "Other Parameters",
Self::Parameters => "Parameters",
Self::Raises => "Raises",
@@ -116,17 +130,259 @@ impl SectionKind {
}
}
pub(crate) struct SectionContexts<'a> {
contexts: Vec<SectionContextData>,
docstring: &'a Docstring<'a>,
}
impl<'a> SectionContexts<'a> {
/// Extract all `SectionContext` values from a docstring.
pub fn from_docstring(docstring: &'a Docstring<'a>, style: SectionStyle) -> Self {
let contents = docstring.body();
let mut contexts = Vec::new();
let mut last: Option<SectionContextData> = None;
let mut previous_line = None;
for line in contents.universal_newlines() {
if previous_line.is_none() {
// skip the first line
previous_line = Some(line.as_str());
continue;
}
if let Some(section_kind) = suspected_as_section(&line, style) {
let indent = whitespace::leading_space(&line);
let section_name = whitespace::leading_words(&line);
let section_name_range = TextRange::at(indent.text_len(), section_name.text_len());
if is_docstring_section(
&line,
section_name_range,
previous_line.unwrap_or_default(),
) {
if let Some(mut last) = last.take() {
last.range = TextRange::new(last.range.start(), line.start());
contexts.push(last);
}
last = Some(SectionContextData {
kind: section_kind,
name_range: section_name_range + line.start(),
range: TextRange::empty(line.start()),
summary_full_end: line.full_end(),
});
}
}
previous_line = Some(line.as_str());
}
if let Some(mut last) = last.take() {
last.range = TextRange::new(last.range.start(), contents.text_len());
contexts.push(last);
}
Self {
contexts,
docstring,
}
}
pub fn len(&self) -> usize {
self.contexts.len()
}
pub fn iter(&self) -> SectionContextsIter {
SectionContextsIter {
docstring_body: self.docstring.body(),
inner: self.contexts.iter(),
}
}
}
impl<'a> IntoIterator for &'a SectionContexts<'a> {
type Item = SectionContext<'a>;
type IntoIter = SectionContextsIter<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl Debug for SectionContexts<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_list().entries(self.iter()).finish()
}
}
pub struct SectionContextsIter<'a> {
docstring_body: DocstringBody<'a>,
inner: std::slice::Iter<'a, SectionContextData>,
}
impl<'a> Iterator for SectionContextsIter<'a> {
type Item = SectionContext<'a>;
fn next(&mut self) -> Option<Self::Item> {
let next = self.inner.next()?;
Some(SectionContext {
data: next,
docstring_body: self.docstring_body,
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
}
impl<'a> DoubleEndedIterator for SectionContextsIter<'a> {
fn next_back(&mut self) -> Option<Self::Item> {
let back = self.inner.next_back()?;
Some(SectionContext {
data: back,
docstring_body: self.docstring_body,
})
}
}
impl FusedIterator for SectionContextsIter<'_> {}
impl ExactSizeIterator for SectionContextsIter<'_> {}
#[derive(Debug)]
pub(crate) struct SectionContext<'a> {
/// The "kind" of the section, e.g. "SectionKind::Args" or "SectionKind::Returns".
pub(crate) kind: SectionKind,
struct SectionContextData {
kind: SectionKind,
/// Range of the section name, relative to the [`Docstring::body`]
name_range: TextRange,
/// Range from the start to the end of the section, relative to the [`Docstring::body`]
range: TextRange,
/// End of the summary, relative to the [`Docstring::body`]
summary_full_end: TextSize,
}
pub struct SectionContext<'a> {
data: &'a SectionContextData,
docstring_body: DocstringBody<'a>,
}
impl<'a> SectionContext<'a> {
pub fn is_last(&self) -> bool {
self.range().end() == self.docstring_body.end()
}
/// The `kind` of the section, e.g. [`SectionKind::Args`] or [`SectionKind::Returns`].
pub const fn kind(&self) -> SectionKind {
self.data.kind
}
/// The name of the section as it appears in the docstring, e.g. "Args" or "Returns".
pub(crate) section_name: &'a str,
pub(crate) previous_line: &'a str,
pub(crate) line: &'a str,
pub(crate) following_lines: &'a [&'a str],
pub(crate) is_last_section: bool,
pub(crate) original_index: usize,
pub fn section_name(&self) -> &'a str {
&self.docstring_body.as_str()[self.data.name_range]
}
/// Returns the rest of the summary line after the section name.
pub fn summary_after_section_name(&self) -> &'a str {
&self.summary_line()[usize::from(self.data.name_range.end() - self.data.range.start())..]
}
fn offset(&self) -> TextSize {
self.docstring_body.start()
}
/// The absolute range of the section name
pub fn section_name_range(&self) -> TextRange {
self.data.name_range + self.offset()
}
/// Summary range relative to the start of the document. Includes the trailing newline.
pub fn summary_full_range(&self) -> TextRange {
self.summary_full_range_relative() + self.offset()
}
/// The absolute range of the summary line, excluding any trailing newline character.
pub fn summary_range(&self) -> TextRange {
TextRange::at(self.range().start(), self.summary_line().text_len())
}
/// Range of the summary line relative to [`Docstring::body`], including the trailing newline character.
fn summary_full_range_relative(&self) -> TextRange {
TextRange::new(self.range_relative().start(), self.data.summary_full_end)
}
/// Returns the range of this section relative to [`Docstring::body`]
const fn range_relative(&self) -> TextRange {
self.data.range
}
/// The absolute range of the full-section.
pub fn range(&self) -> TextRange {
self.range_relative() + self.offset()
}
/// Summary line without the trailing newline characters
pub fn summary_line(&self) -> &'a str {
let full_summary = &self.docstring_body.as_str()[self.summary_full_range_relative()];
let mut bytes = full_summary.bytes().rev();
let newline_width = match bytes.next() {
Some(b'\n') => {
if bytes.next() == Some(b'\r') {
2
} else {
1
}
}
Some(b'\r') => 1,
_ => 0,
};
&full_summary[..full_summary.len() - newline_width]
}
/// Returns the text of the last line of the previous section or an empty string if it is the first section.
pub fn previous_line(&self) -> Option<&'a str> {
let previous =
&self.docstring_body.as_str()[TextRange::up_to(self.range_relative().start())];
previous.universal_newlines().last().map(|l| l.as_str())
}
/// Returns the lines belonging to this section after the summary line.
pub fn following_lines(&self) -> UniversalNewlineIterator<'a> {
let lines = self.following_lines_str();
UniversalNewlineIterator::with_offset(lines, self.offset() + self.data.summary_full_end)
}
fn following_lines_str(&self) -> &'a str {
&self.docstring_body.as_str()[self.following_range_relative()]
}
/// Returns the range to the following lines relative to [`Docstring::body`].
const fn following_range_relative(&self) -> TextRange {
TextRange::new(self.data.summary_full_end, self.range_relative().end())
}
/// Returns the absolute range of the following lines.
pub fn following_range(&self) -> TextRange {
self.following_range_relative() + self.offset()
}
}
impl Debug for SectionContext<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("SectionContext")
.field("kind", &self.kind())
.field("section_name", &self.section_name())
.field("summary_line", &self.summary_line())
.field("following_lines", &&self.following_lines_str())
.finish()
}
}
fn suspected_as_section(line: &str, style: SectionStyle) -> Option<SectionKind> {
@@ -139,20 +395,15 @@ fn suspected_as_section(line: &str, style: SectionStyle) -> Option<SectionKind>
}
/// Check if the suspected context is really a section header.
fn is_docstring_section(context: &SectionContext) -> bool {
let section_name_suffix = context
.line
.trim()
.strip_prefix(context.section_name)
.unwrap()
.trim();
fn is_docstring_section(line: &str, section_name_range: TextRange, previous_lines: &str) -> bool {
let section_name_suffix = line[usize::from(section_name_range.end())..].trim();
let this_looks_like_a_section_name =
section_name_suffix == ":" || section_name_suffix.is_empty();
if !this_looks_like_a_section_name {
return false;
}
let prev_line = context.previous_line.trim();
let prev_line = previous_lines.trim();
let prev_line_ends_with_punctuation = [',', ';', '.', '-', '\\', '/', ']', '}', ')']
.into_iter()
.any(|char| prev_line.ends_with(char));
@@ -164,50 +415,3 @@ fn is_docstring_section(context: &SectionContext) -> bool {
true
}
/// Extract all `SectionContext` values from a docstring.
pub(crate) fn section_contexts<'a>(
lines: &'a [&'a str],
style: SectionStyle,
) -> Vec<SectionContext<'a>> {
let mut contexts = vec![];
for (kind, lineno) in lines
.iter()
.enumerate()
.skip(1)
.filter_map(|(lineno, line)| suspected_as_section(line, style).map(|kind| (kind, lineno)))
{
let context = SectionContext {
kind,
section_name: whitespace::leading_words(lines[lineno]),
previous_line: lines[lineno - 1],
line: lines[lineno],
following_lines: &lines[lineno + 1..],
original_index: lineno,
is_last_section: false,
};
if is_docstring_section(&context) {
contexts.push(context);
}
}
let mut truncated_contexts = Vec::with_capacity(contexts.len());
let mut end: Option<usize> = None;
for context in contexts.into_iter().rev() {
let next_end = context.original_index;
truncated_contexts.push(SectionContext {
kind: context.kind,
section_name: context.section_name,
previous_line: context.previous_line,
line: context.line,
following_lines: end.map_or(context.following_lines, |end| {
&lines[context.original_index + 1..end]
}),
original_index: context.original_index,
is_last_section: end.is_none(),
});
end = Some(next_end);
}
truncated_contexts.reverse();
truncated_contexts
}

View File

@@ -1,36 +1,21 @@
use std::path::{Path, PathBuf};
use anyhow::{anyhow, Result};
use globset::GlobMatcher;
use log::debug;
use path_absolutize::{path_dedot, Absolutize};
use crate::registry::RuleSet;
/// Extract the absolute path and basename (as strings) from a Path.
pub fn extract_path_names(path: &Path) -> Result<(&str, &str)> {
let file_path = path
.to_str()
.ok_or_else(|| anyhow!("Unable to parse filename: {:?}", path))?;
let file_basename = path
.file_name()
.ok_or_else(|| anyhow!("Unable to parse filename: {:?}", path))?
.to_str()
.ok_or_else(|| anyhow!("Unable to parse filename: {:?}", path))?;
Ok((file_path, file_basename))
}
/// Create a set with codes matching the pattern/code pairs.
pub(crate) fn ignores_from_path(
path: &Path,
pattern_code_pairs: &[(GlobMatcher, GlobMatcher, RuleSet)],
) -> RuleSet {
let (file_path, file_basename) = extract_path_names(path).expect("Unable to parse filename");
let file_name = path.file_name().expect("Unable to parse filename");
pattern_code_pairs
.iter()
.filter_map(|(absolute, basename, rules)| {
if basename.is_match(file_basename) {
if basename.is_match(file_name) {
debug!(
"Adding per-file ignores for {:?} due to basename match on {:?}: {:?}",
path,
@@ -38,7 +23,7 @@ pub(crate) fn ignores_from_path(
rules
);
Some(rules)
} else if absolute.is_match(file_path) {
} else if absolute.is_match(path) {
debug!(
"Adding per-file ignores for {:?} due to absolute match on {:?}: {:?}",
path,

View File

@@ -2,8 +2,9 @@
use anyhow::Result;
use libcst_native::{Codegen, CodegenState, ImportAlias, Name, NameOrAttribute};
use ruff_text_size::TextSize;
use rustc_hash::FxHashMap;
use rustpython_parser::ast::{Location, Stmt, StmtKind, Suite};
use rustpython_parser::ast::{Stmt, StmtKind, Suite};
use rustpython_parser::{lexer, Mode, Tok};
use ruff_diagnostics::Edit;
@@ -95,7 +96,7 @@ impl<'a> Importer<'a> {
/// Add the given member to an existing `StmtKind::ImportFrom` statement.
pub fn add_member(&self, stmt: &Stmt, member: &str) -> Result<Edit> {
let mut tree = match_module(self.locator.slice(stmt))?;
let mut tree = match_module(self.locator.slice(stmt.range()))?;
let import_from = match_import_from(&mut tree)?;
let aliases = match_aliases(import_from)?;
aliases.push(ImportAlias {
@@ -113,11 +114,7 @@ impl<'a> Importer<'a> {
..CodegenState::default()
};
tree.codegen(&mut state);
Ok(Edit::replacement(
state.to_string(),
stmt.location,
stmt.end_location.unwrap(),
))
Ok(Edit::range_replacement(state.to_string(), stmt.range()))
}
}
@@ -126,13 +123,13 @@ struct Insertion {
/// The content to add before the insertion.
prefix: &'static str,
/// The location at which to insert.
location: Location,
location: TextSize,
/// The content to add after the insertion.
suffix: &'static str,
}
impl Insertion {
fn new(prefix: &'static str, location: Location, suffix: &'static str) -> Self {
fn new(prefix: &'static str, location: TextSize, suffix: &'static str) -> Self {
Self {
prefix,
location,
@@ -142,7 +139,7 @@ impl Insertion {
}
/// Find the end of the last docstring.
fn match_docstring_end(body: &[Stmt]) -> Option<Location> {
fn match_docstring_end(body: &[Stmt]) -> Option<TextSize> {
let mut iter = body.iter();
let Some(mut stmt) = iter.next() else {
return None;
@@ -156,7 +153,7 @@ fn match_docstring_end(body: &[Stmt]) -> Option<Location> {
}
stmt = next;
}
Some(stmt.end_location.unwrap())
Some(stmt.end())
}
/// Find the location at which a "top-of-file" import should be inserted,
@@ -173,17 +170,17 @@ fn match_docstring_end(body: &[Stmt]) -> Option<Location> {
/// The location returned will be the start of the `import os` statement,
/// along with a trailing newline suffix.
fn end_of_statement_insertion(stmt: &Stmt, locator: &Locator, stylist: &Stylist) -> Insertion {
let location = stmt.end_location.unwrap();
let mut tokens = lexer::lex_located(locator.skip(location), Mode::Module, location).flatten();
if let Some((.., Tok::Semi, end)) = tokens.next() {
let location = stmt.end();
let mut tokens = lexer::lex_located(locator.after(location), Mode::Module, location).flatten();
if let Some((Tok::Semi, range)) = tokens.next() {
// If the first token after the docstring is a semicolon, insert after the semicolon as an
// inline statement;
Insertion::new(" ", end, ";")
Insertion::new(" ", range.end(), ";")
} else {
// Otherwise, insert on the next line.
Insertion::new(
"",
Location::new(location.row() + 1, 0),
locator.line_end(location),
stylist.line_ending().as_str(),
)
}
@@ -207,25 +204,25 @@ fn top_of_file_insertion(body: &[Stmt], locator: &Locator, stylist: &Stylist) ->
let mut location = if let Some(location) = match_docstring_end(body) {
// If the first token after the docstring is a semicolon, insert after the semicolon as an
// inline statement;
let first_token = lexer::lex_located(locator.skip(location), Mode::Module, location)
let first_token = lexer::lex_located(locator.after(location), Mode::Module, location)
.flatten()
.next();
if let Some((.., Tok::Semi, end)) = first_token {
return Insertion::new(" ", end, ";");
if let Some((Tok::Semi, range)) = first_token {
return Insertion::new(" ", range.end(), ";");
}
// Otherwise, advance to the next row.
Location::new(location.row() + 1, 0)
locator.full_line_end(location)
} else {
Location::default()
TextSize::default()
};
// Skip over any comments and empty lines.
for (.., tok, end) in
lexer::lex_located(locator.skip(location), Mode::Module, location).flatten()
for (tok, range) in
lexer::lex_located(locator.after(location), Mode::Module, location).flatten()
{
if matches!(tok, Tok::Comment(..) | Tok::Newline) {
location = Location::new(end.row() + 1, 0);
location = locator.full_line_end(range.end());
} else {
break;
}
@@ -237,11 +234,12 @@ fn top_of_file_insertion(body: &[Stmt], locator: &Locator, stylist: &Stylist) ->
#[cfg(test)]
mod tests {
use anyhow::Result;
use ruff_python_ast::newlines::LineEnding;
use ruff_text_size::TextSize;
use rustpython_parser as parser;
use rustpython_parser::ast::Location;
use rustpython_parser::lexer::LexResult;
use ruff_python_ast::source_code::{LineEnding, Locator, Stylist};
use ruff_python_ast::source_code::{Locator, Stylist};
use crate::importer::{top_of_file_insertion, Insertion};
@@ -258,7 +256,7 @@ mod tests {
let contents = "";
assert_eq!(
insert(contents)?,
Insertion::new("", Location::new(1, 0), LineEnding::default().as_str())
Insertion::new("", TextSize::from(0), LineEnding::default().as_str())
);
let contents = r#"
@@ -266,7 +264,7 @@ mod tests {
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", Location::new(2, 0), LineEnding::default().as_str())
Insertion::new("", TextSize::from(19), LineEnding::default().as_str())
);
let contents = r#"
@@ -275,7 +273,7 @@ mod tests {
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", Location::new(2, 0), "\n")
Insertion::new("", TextSize::from(20), "\n")
);
let contents = r#"
@@ -285,7 +283,7 @@ mod tests {
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", Location::new(3, 0), "\n")
Insertion::new("", TextSize::from(40), "\n")
);
let contents = r#"
@@ -294,7 +292,7 @@ x = 1
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", Location::new(1, 0), "\n")
Insertion::new("", TextSize::from(0), "\n")
);
let contents = r#"
@@ -303,7 +301,7 @@ x = 1
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", Location::new(2, 0), "\n")
Insertion::new("", TextSize::from(23), "\n")
);
let contents = r#"
@@ -313,7 +311,7 @@ x = 1
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", Location::new(3, 0), "\n")
Insertion::new("", TextSize::from(43), "\n")
);
let contents = r#"
@@ -323,7 +321,7 @@ x = 1
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", Location::new(3, 0), "\n")
Insertion::new("", TextSize::from(43), "\n")
);
let contents = r#"
@@ -332,7 +330,7 @@ x = 1
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", Location::new(1, 0), "\n")
Insertion::new("", TextSize::from(0), "\n")
);
let contents = r#"
@@ -341,7 +339,7 @@ x = 1
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new(" ", Location::new(1, 20), ";")
Insertion::new(" ", TextSize::from(20), ";")
);
let contents = r#"
@@ -351,7 +349,7 @@ x = 1
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new(" ", Location::new(1, 20), ";")
Insertion::new(" ", TextSize::from(20), ";")
);
Ok(())

Some files were not shown because too many files have changed in this diff Show More