Compare commits

..

96 Commits

Author SHA1 Message Date
Charlie Marsh
e9fc63331a Reverts 2023-05-21 15:22:52 -04:00
Charlie Marsh
df3b95a73d clean up 2023-05-21 14:52:04 -04:00
Charlie Marsh
a4432102f1 Theres enough here for a proposal 2023-05-21 14:19:41 -04:00
Charlie Marsh
3295ccfbc4 Rename some stuff 2023-05-21 14:19:41 -04:00
Charlie Marsh
7b315b84e2 Make generic 2023-05-21 14:19:41 -04:00
Charlie Marsh
bb2adb3017 Introduce traits 2023-05-21 14:19:41 -04:00
Charlie Marsh
5536d2befc Benchmark UP 2023-05-21 14:19:41 -04:00
Charlie Marsh
b4824979b0 Separate struct 2023-05-21 14:19:41 -04:00
Charlie Marsh
8a2f58065e Dispatch rules off a vector 2023-05-21 14:19:41 -04:00
Arne de Laat
8ca3977602 Fix false-positive for TRY302 if exception cause is given (#4559) 2023-05-21 11:49:53 -04:00
Jacob Coffee
6db05d8cc6 Starlite -> Litestar (#4554) 2023-05-21 09:55:26 -04:00
Jonathan Plasse
fc63c6f2e2 Fix PLE01310 typo (#4550) 2023-05-20 19:34:03 +00:00
Jonathan Plasse
f7f5bc9085 Fix SIM401 snapshot (#4547) 2023-05-20 14:18:19 -04:00
Charlie Marsh
6b85430a14 Ignore #region code folding marks in eradicate rules (#4546) 2023-05-20 16:45:49 +00:00
Jonathan Plasse
a68c865010 Fix SIM110 and SIM111 ranges (#4545) 2023-05-20 12:40:35 -04:00
Charlie Marsh
fe7f2e2e4d Move submodule alias resolution into Context (#4543) 2023-05-20 16:34:10 +00:00
Felipe Peter
0a3cf8ba11 Fix typos in docs (#4540) 2023-05-20 07:23:17 -04:00
Charlie Marsh
bf5b463c0d Include empty success test in JUnit output (#4537) 2023-05-20 03:38:51 +00:00
Charlie Marsh
6aa9900c03 Improve handling of __qualname__, __module__, and __class__ (#4512) 2023-05-20 03:03:45 +00:00
Charlie Marsh
9e21414294 Improve reference resolution for deferred-annotations-within-classes (#4509) 2023-05-20 02:54:18 +00:00
Charlie Marsh
bb4e674415 Move reference-resolution into Context (#4510) 2023-05-20 02:47:15 +00:00
Charlie Marsh
b42ff08612 Parenthesize more sub-expressions in f-string conversion (#4535) 2023-05-19 19:41:30 +00:00
Jonathan Plasse
03fb62c174 Fix RUF010 auto-fix with parenthesis (#4524) 2023-05-19 19:05:51 +00:00
Jonathan Plasse
2dfc645ea9 Fix UP032 auto-fix with integers (#4525) 2023-05-19 18:53:50 +00:00
Hoël Bagard
fe8e2bb237 [pylint] Add named_expr_without_context (W0131) (#4531) 2023-05-19 18:00:01 +00:00
Tom Kuson
a9ed8d5391 Add Pylint docs (#4530) 2023-05-19 17:40:18 +00:00
Aaron Cunningham
41a681531d Support new extend-per-file-ignores setting (#4265) 2023-05-19 12:24:04 -04:00
Justin Prieto
837e70677b [flake8-pyi] Implement PYI013 (#4517) 2023-05-19 15:39:55 +00:00
Hoël Bagard
7ebe372122 [pylint] Add duplicate-value (W0130) (#4515) 2023-05-19 15:03:47 +00:00
konstin
625849b846 Ecosystem CI: Optionally diff fixes (#4193)
* Generate fixes when using --show-fixes

Example command: `cargo run --bin ruff -- --no-cache --select F401
--show-source --show-fixes
crates/ruff/resources/test/fixtures/pyflakes/F401_9.py`

Before, `--show-fixes` was ignored:

```
crates/ruff/resources/test/fixtures/pyflakes/F401_9.py:4:22: F401 [*] `foo.baz` imported but unused
  |
4 | __all__ = ("bar",)
5 | from foo import bar, baz
  |                      ^^^ F401
  |
  = help: Remove unused import: `foo.baz`

Found 1 error.
[*] 1 potentially fixable with the --fix option.
```

After:

```
crates/ruff/resources/test/fixtures/pyflakes/F401_9.py:4:22: F401 [*] `foo.baz` imported but unused
  |
4 | __all__ = ("bar",)
5 | from foo import bar, baz
  |                      ^^^ F401
  |
  = help: Remove unused import: `foo.baz`

ℹ Suggested fix
1 1 | """Test: late-binding of `__all__`."""
2 2 |
3 3 | __all__ = ("bar",)
4   |-from foo import bar, baz
  4 |+from foo import bar

Found 1 error.
[*] 1 potentially fixable with the --fix option.
```

Also fixes git clone
2023-05-19 09:49:57 +00:00
konstin
32f1edc555 Create dummy format CLI (#4453)
* Create dummy format CLI

* Hide format from clap, too

Missed that this is a separate option from `#[doc(hidden)]`

* Remove cargo feature and replace with warning

* No-alloc files parameter matching

* beta warning: warn -> warn_user_once

* Rephrase warning
2023-05-19 11:45:52 +02:00
Micha Reiser
2f35099f81 Remove regex dependency from ruff_python_ast (#4518) 2023-05-19 06:44:18 +00:00
Hoël Bagard
ce8fd31a8f Updated contributing documentation (#4516) 2023-05-19 08:39:15 +02:00
Ville Skyttä
fdb241cad2 [flake8-bandit] Implement paramiko-call (S601) (#4500) 2023-05-19 03:40:50 +00:00
Charlie Marsh
ab303f4e09 Gate schemars skip under feature flag (#4514) 2023-05-19 03:01:31 +00:00
Charlie Marsh
15cb21a6f4 Implement --extend-fixable option (#4297) 2023-05-18 22:20:19 -04:00
Ville Skyttä
2e2ba2cb16 Avoid some false positives in dunder variable assigments (#4508) 2023-05-19 02:11:20 +00:00
Charlie Marsh
d4c0a41b00 Bump version to 0.0.269 (#4506) 2023-05-18 19:45:20 +00:00
Charlie Marsh
8702b5a40a Bump version to 0.0.268 (#4501) 2023-05-18 15:35:46 -04:00
figsoda
bab818e801 Update RustPython dependencies (#4503) 2023-05-18 15:28:13 -04:00
konstin
a3aa841fc9 Overhaul sdist handling (#4439)
* Reduce sdist size

`maturin sdist && du -sh target/wheels/ruff-0.0.267.tar.gz`:
Before: 1,1M
After: 668K

* Test sdist before release

* Update maturin to fix the sdist
2023-05-18 19:02:22 +02:00
Ville Skyttä
fdd894145b S608 improvements (#4499) 2023-05-18 11:27:22 -04:00
Charlie Marsh
85f67b2ee3 Make the AST Checker pub(crate) (#4498) 2023-05-18 15:17:26 +00:00
Charlie Marsh
e9c6f16c56 Move unparse utility methods onto Generator (#4497) 2023-05-18 15:00:46 +00:00
Charlie Marsh
d3b18345c5 Move triple-quoted string detection into Indexer method (#4495) 2023-05-18 14:42:05 +00:00
Jonathan Plasse
0e4d174551 Fix COM812 false positive in string subscript (#4493) 2023-05-18 14:35:41 +00:00
Charlie Marsh
73efbeb581 Invert quote-style when generating code within f-strings (#4487) 2023-05-18 14:33:33 +00:00
Charlie Marsh
2fb312bb2b Fix scoping of comprehensions within classes (#4494) 2023-05-18 14:30:02 +00:00
Charlie Marsh
e8e66f3824 Remove unnecessary path prefixes (#4492) 2023-05-18 10:19:09 -04:00
Charlie Marsh
a8d080c825 Extend multi-line noqa directives to start-of-line (#4490) 2023-05-18 13:05:27 +00:00
Charlie Marsh
ddd541b198 Move Insertion into its own module (#4478) 2023-05-17 21:11:41 +00:00
Tom Kuson
3090aec97d Add PLW docs (#4469) 2023-05-17 18:30:45 +00:00
Charlie Marsh
14c6419bc1 Bring pycodestyle rules into full compatibility (on SciPy) (#4472) 2023-05-17 16:51:55 +00:00
Charlie Marsh
3bc29d6c0c Allow shebang comments at start-of-file (#4473) 2023-05-17 16:32:12 +00:00
Charlie Marsh
67c5086aba Include precise tokens for extraneous-whitespace diagnostics (#4471) 2023-05-17 16:25:17 +00:00
Charlie Marsh
cd82b83f89 Avoid triggering pd#at and friends on non-subscripts (#4474) 2023-05-17 16:20:58 +00:00
Charlie Marsh
39fb2cc732 Remove special-casing for whitespace-around-@ (#4458) 2023-05-17 15:32:08 +00:00
John Kelly
9c732c7946 Implement TRY302 - raise after except (#4461) 2023-05-17 01:36:10 +00:00
Charlie Marsh
2332ea5753 Remove type-complexity ignores from map_codes.rs (#4463) 2023-05-17 01:02:24 +00:00
Charlie Marsh
6b1062ccc3 Enable pycodestyle rules under new "nursery" category (#4407) 2023-05-16 21:21:58 +00:00
Charlie Marsh
39fa38cb35 Enable pycodestyle rules (#3689) 2023-05-16 20:39:43 +00:00
Micha Reiser
ddf7de7e86 Prototype Black's string joining/splitting (#4449) 2023-05-16 18:42:40 +01:00
Charlie Marsh
e5101e8eac Split logical lines tests into one test per assertion (#4457) 2023-05-16 17:40:39 +00:00
Charlie Marsh
d9c3f8e249 Avoid flagging missing whitespace for decorators (#4454) 2023-05-16 13:15:01 -04:00
Charlie Marsh
7e0d018b35 Avoid emitting empty logical lines (#4452) 2023-05-16 16:33:33 +00:00
Jeong, YunWon
4b05ca1198 Specialize ConversionFlag (#4450) 2023-05-16 18:00:13 +02:00
Charlie Marsh
f0465bf106 Emit non-logical newlines for "empty" lines (#4444) 2023-05-16 14:58:56 +00:00
Charlie Marsh
8134ec25f0 Fix expected-indentation errors with end-of-line comments (#4438) 2023-05-16 10:45:54 -04:00
Jeong, YunWon
6049aabe27 Update RustPyhon and enable full-lexer feature (#4442) 2023-05-16 07:19:57 +00:00
Jeong, YunWon
badade3ccc Impl Default for SourceLocation (#4328)
Co-authored-by: Micha Reiser <micha@reiser.io>
2023-05-16 07:03:43 +00:00
Micha Reiser
fa26860296 Refactor range from Attributed to Nodes (#4422) 2023-05-16 06:36:32 +00:00
James Lamb
140e0acf54 Add LightGBM to user list (#4446) 2023-05-16 04:04:37 +00:00
Sladyn
c711db11ce [flake8-pyi] Implement unannotated-assignment-in-stub (PY052) (#4293) 2023-05-16 02:06:55 +00:00
Charlie Marsh
1fe6954150 Fix bidirectional-unicode formatting (#4445) 2023-05-15 22:36:25 +00:00
Charlie Marsh
2414469ac3 Enable automatic rewrites of typing.Deque and typing.DefaultDict (#4420) 2023-05-15 22:33:24 +00:00
Tom Kuson
838ba1ca3d Add PLE rule docs (#4437) 2023-05-15 19:48:18 +00:00
Charlie Marsh
8f3f8d3e0b Revert change to re-run release on tag update (#4441) 2023-05-15 15:48:45 +00:00
qdegraaf
8ba9eb83af Implement flake8-async plugin (#4432) 2023-05-15 09:15:28 -04:00
Zanie Adkins
2c6efc2f5f Update C419 to be a suggested fix (#4424) 2023-05-15 10:30:40 +02:00
Ben Doerry
d6930ca991 Merge subsettings when extending configurations (#4431) 2023-05-15 02:34:58 +00:00
Yanks Yoon
f70c286e6a docs: update contributing guide (#4428) 2023-05-15 02:21:37 +00:00
Charlie Marsh
dcff515ad8 Make extend_function_names an Option type (#4434) 2023-05-15 02:15:02 +00:00
Jonathan Plasse
b9e387013f Fix RUF010 autofix within f-strings (#4423) 2023-05-15 02:08:30 +00:00
Charlie Marsh
a69451ff46 [pyupgrade] Remove keep-runtime-typing setting (#4427) 2023-05-14 03:12:52 +00:00
Tyler Yep
01b372a75c Implement flake8-future-annotations FA100 (#3979) 2023-05-14 03:00:06 +00:00
Charlie Marsh
cd2e7fa72a Use TextSize for flake8-todos Directive methods (#4426) 2023-05-13 22:05:51 -04:00
Charlie Marsh
fdf0b999cd Replace TODO tag regex with a lexer (#4413) 2023-05-13 15:23:46 +00:00
Jonathan Plasse
45b5fa573f Ignore ANN401 for overridden methods (#4409) 2023-05-13 15:20:04 +00:00
Jonathan Plasse
a0258f2205 [pylint] Fix PLW3301 auto-fix with generators (#4412) 2023-05-13 11:17:13 -04:00
alm
0a68636de3 [pylint] Add duplicate-bases rule (#4411) 2023-05-13 14:28:03 +00:00
Evan Rittenhouse
2f53781a77 Implement flake8_todos (#3921) 2023-05-13 14:19:06 +00:00
Micha Reiser
7e7be05ddf Upgrade dependencies (#4389) 2023-05-13 13:00:25 +00:00
Micha Reiser
f5afa8198c Use new rustpython_format crate over rustpython-common (#4388) 2023-05-13 12:35:02 +00:00
Charlie Marsh
eeabfd6d18 Enable autofix for split-assertions at top level (#4405) 2023-05-12 17:35:49 -04:00
Charlie Marsh
490301f9fe Replace macro_rules! visitors with dedicated methods (#4402) 2023-05-12 17:05:59 -04:00
Zanie Adkins
f5be3d8e5b Update CI to test Python wheel on Linux (#4398) 2023-05-12 16:27:18 -04:00
586 changed files with 14770 additions and 7612 deletions

View File

@@ -15,6 +15,8 @@ env:
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
PACKAGE_NAME: ruff
PYTHON_VERSION: "3.7" # to build abi3 wheels
jobs:
cargo-fmt:
@@ -181,18 +183,30 @@ jobs:
- name: "Install cargo-udeps"
uses: taiki-e/install-action@cargo-udeps
- name: "Run cargo-udeps"
run: cargo +nightly-2023-03-30 udeps
python-package:
name: "python package"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
architecture: x64
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build wheels"
uses: PyO3/maturin-action@v1
with:
manylinux: auto
args: --out dist
- name: "Test wheel"
run: |
unused_dependencies=$(cargo +nightly-2023-03-30 udeps > unused.txt && cat unused.txt | cut -d $'\n' -f 2-)
if [ -z "$unused_dependencies" ]; then
echo "No unused dependencies found" > $GITHUB_STEP_SUMMARY
exit 0
else
echo "Found unused dependencies" > $GITHUB_STEP_SUMMARY
echo '```console' >> $GITHUB_STEP_SUMMARY
echo "$unused_dependencies" >> $GITHUB_STEP_SUMMARY
echo '```' >> $GITHUB_STEP_SUMMARY
exit 1
fi
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
ruff --help
python -m ruff --help
pre-commit:
name: "pre-commit"

View File

@@ -1,9 +1,9 @@
name: "[ruff] Release"
on:
push:
tags:
- v*
workflow_dispatch:
release:
types: [published]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@@ -18,6 +18,31 @@ env:
RUSTUP_MAX_RETRIES: 10
jobs:
sdist:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi
- name: "Build sdist"
uses: PyO3/maturin-action@v1
with:
command: sdist
args: --out dist
- name: "Test sdist"
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.tar.gz --force-reinstall
ruff --help
python -m ruff --help
- name: "Upload sdist"
uses: actions/upload-artifact@v3
with:
name: wheels
path: dist
macos-x86_64:
runs-on: macos-latest
steps:
@@ -32,7 +57,7 @@ jobs:
uses: PyO3/maturin-action@v1
with:
target: x86_64
args: --release --out dist --sdist
args: --release --out dist
- name: "Test wheel - x86_64"
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall

View File

@@ -1,5 +1,14 @@
# Breaking Changes
## 0.0.268
### The `keep-runtime-typing` setting has been removed ([#4427](https://github.com/charliermarsh/ruff/pull/4427))
Enabling the `keep-runtime-typing` option, located under the `pyupgrade` section, is equivalent
to ignoring the `UP006` and `UP007` rules via Ruff's standard `ignore` mechanism. As there's no
need for a dedicated setting to disable these rules, the `keep-runtime-typing` option has been
removed.
## 0.0.267
### `update-check` is no longer a valid configuration option ([#4313](https://github.com/charliermarsh/ruff/pull/4313))

View File

@@ -106,7 +106,7 @@ At a high level, the steps involved in adding a new lint rule are as follows:
1. Create a file for your rule (e.g., `crates/ruff/src/rules/flake8_bugbear/rules/abstract_base_class.rs`).
1. In that file, define a violation struct. You can grep for `#[violation]` to see examples.
1. Map the violation struct to a rule code in `crates/ruff/src/registry.rs` (e.g., `E402`).
1. Define the logic for triggering the violation in `crates/ruff/src/checkers/ast.rs` (for AST-based
1. Define the logic for triggering the violation in `crates/ruff/src/checkers/ast/mod.rs` (for AST-based
checks), `crates/ruff/src/checkers/tokens.rs` (for token-based checks), `crates/ruff/src/checkers/lines.rs`
(for text-based checks), or `crates/ruff/src/checkers/filesystem.rs` (for filesystem-based
checks).
@@ -134,7 +134,7 @@ Run `cargo dev generate-all` to generate the code for your new fixture. Then run
locally with (e.g.) `cargo run -p ruff_cli -- check crates/ruff/resources/test/fixtures/pycodestyle/E402.py --no-cache --select E402`.
Once you're satisfied with the output, codify the behavior as a snapshot test by adding a new
`test_case` macro in the relevant `crates/ruff/src/[linter]/mod.rs` file. Then, run `cargo test`.
`test_case` macro in the relevant `crates/ruff/src/rules/[linter]/mod.rs` file. Then, run `cargo test`.
Your test will fail, but you'll be prompted to follow-up with `cargo insta review`. Accept the
generated snapshot, then commit the snapshot file alongside the rest of your changes.
@@ -148,7 +148,7 @@ This implies that rule names:
- should state the bad thing being checked for
- should not contain instructions on what you what you should use instead
- should not contain instructions on what you should use instead
(these belong in the rule documentation and the `autofix_title` for rules that have autofix)
When re-implementing rules from other linters, this convention is given more importance than

458
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -11,7 +11,7 @@ authors = ["Charlie Marsh <charlie.r.marsh@gmail.com>"]
[workspace.dependencies]
anyhow = { version = "1.0.69" }
bitflags = { version = "2.2.1" }
bitflags = { version = "2.3.1" }
chrono = { version = "0.4.23", default-features = false, features = ["clock"] }
clap = { version = "4.1.8", features = ["derive"] }
colored = { version = "2.0.0" }
@@ -30,11 +30,11 @@ path-absolutize = { version = "3.0.14" }
proc-macro2 = { version = "1.0.51" }
quote = { version = "1.0.23" }
regex = { version = "1.7.1" }
ruff_text_size = { git = "https://github.com/RustPython/Parser.git", rev = "947fb53d0b41fec465db3d8e725bdb2eec1299ec" }
rustc-hash = { version = "1.1.0" }
rustpython-common = { git = "https://github.com/RustPython/RustPython.git", rev = "f3e4d3409253660bd4fa7f3d24d3db747e7dca61" }
rustpython-literal = { git = "https://github.com/RustPython/Parser.git", rev = "947fb53d0b41fec465db3d8e725bdb2eec1299ec" }
rustpython-parser = { git = "https://github.com/RustPython/Parser.git", rev = "947fb53d0b41fec465db3d8e725bdb2eec1299ec" , default-features = false}
ruff_text_size = { git = "https://github.com/RustPython/Parser.git", rev = "3654cf0bdfc270df6b2b83e2df086843574ad082" }
rustpython-format = { git = "https://github.com/RustPython/Parser.git", rev = "3654cf0bdfc270df6b2b83e2df086843574ad082" }
rustpython-literal = { git = "https://github.com/RustPython/Parser.git", rev = "3654cf0bdfc270df6b2b83e2df086843574ad082" }
rustpython-parser = { git = "https://github.com/RustPython/Parser.git", rev = "3654cf0bdfc270df6b2b83e2df086843574ad082", default-features = false, features = ["full-lexer", "all-nodes-with-ranges"] }
schemars = { version = "0.8.12" }
serde = { version = "1.0.152", features = ["derive"] }
serde_json = { version = "1.0.93", features = ["preserve_order"] }

56
LICENSE
View File

@@ -354,6 +354,37 @@ are:
SOFTWARE.
"""
- flake8-todos, licensed as follows:
"""
Copyright (c) 2019 EclecticIQ. All rights reserved.
Copyright (c) 2020 Gram <gram@orsinium.dev>. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
- flake8-unused-arguments, licensed as follows:
"""
MIT License
@@ -783,6 +814,31 @@ are:
SOFTWARE.
"""
- flake8-async, licensed as follows:
"""
MIT License
Copyright (c) 2022 Cooper Lees
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
- flake8-type-checking, licensed as follows:
"""
Copyright (c) 2021, Sondre Lillebø Gundersen

View File

@@ -137,7 +137,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com) hook:
```yaml
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
rev: 'v0.0.267'
rev: 'v0.0.269'
hooks:
- id: ruff
```
@@ -249,6 +249,7 @@ quality tools, including:
- [eradicate](https://pypi.org/project/eradicate/)
- [flake8-2020](https://pypi.org/project/flake8-2020/)
- [flake8-annotations](https://pypi.org/project/flake8-annotations/)
- [flake8-async](https://pypi.org/project/flake8-async)
- [flake8-bandit](https://pypi.org/project/flake8-bandit/) ([#1646](https://github.com/charliermarsh/ruff/issues/1646))
- [flake8-blind-except](https://pypi.org/project/flake8-blind-except/)
- [flake8-boolean-trap](https://pypi.org/project/flake8-boolean-trap/)
@@ -263,6 +264,7 @@ quality tools, including:
- [flake8-eradicate](https://pypi.org/project/flake8-eradicate/)
- [flake8-errmsg](https://pypi.org/project/flake8-errmsg/)
- [flake8-executable](https://pypi.org/project/flake8-executable/)
- [flake8-future-annotations](https://pypi.org/project/flake8-future-annotations/)
- [flake8-gettext](https://pypi.org/project/flake8-gettext/)
- [flake8-implicit-str-concat](https://pypi.org/project/flake8-implicit-str-concat/)
- [flake8-import-conventions](https://github.com/joaopalmeiro/flake8-import-conventions)
@@ -279,6 +281,7 @@ quality tools, including:
- [flake8-simplify](https://pypi.org/project/flake8-simplify/)
- [flake8-super](https://pypi.org/project/flake8-super/)
- [flake8-tidy-imports](https://pypi.org/project/flake8-tidy-imports/)
- [flake8-todos](https://pypi.org/project/flake8-todos/)
- [flake8-type-checking](https://pypi.org/project/flake8-type-checking/)
- [flake8-use-pathlib](https://pypi.org/project/flake8-use-pathlib/)
- [flynt](https://pypi.org/project/flynt/) ([#2102](https://github.com/charliermarsh/ruff/issues/2102))
@@ -361,7 +364,7 @@ Ruff is used by a number of major open-source projects and companies, including:
- Modern Treasury ([Python SDK](https://github.com/Modern-Treasury/modern-treasury-python-sdk))
- Mozilla ([Firefox](https://github.com/mozilla/gecko-dev))
- [MegaLinter](https://github.com/oxsecurity/megalinter)
- Microsoft ([Semantic Kernel](https://github.com/microsoft/semantic-kernel), [ONNX Runtime](https://github.com/microsoft/onnxruntime))
- Microsoft ([Semantic Kernel](https://github.com/microsoft/semantic-kernel), [ONNX Runtime](https://github.com/microsoft/onnxruntime), [LightGBM](https://github.com/microsoft/LightGBM))
- Netflix ([Dispatch](https://github.com/Netflix/dispatch))
- [Neon](https://github.com/neondatabase/neon)
- [ONNX](https://github.com/onnx/onnx)
@@ -385,7 +388,7 @@ Ruff is used by a number of major open-source projects and companies, including:
- [SciPy](https://github.com/scipy/scipy)
- [Sphinx](https://github.com/sphinx-doc/sphinx)
- [Stable Baselines3](https://github.com/DLR-RM/stable-baselines3)
- [Starlite](https://github.com/starlite-api/starlite)
- [Litestar](https://litestar.dev/)
- [The Algorithms](https://github.com/TheAlgorithms/Python)
- [Vega-Altair](https://github.com/altair-viz/altair)
- WordPress ([Openverse](https://github.com/WordPress/openverse))

View File

@@ -1,6 +1,6 @@
[package]
name = "flake8-to-ruff"
version = "0.0.267"
version = "0.0.269"
edition = { workspace = true }
rust-version = { workspace = true }

View File

@@ -26,7 +26,7 @@ requires-python = ">=3.7"
repository = "https://github.com/charliermarsh/ruff#subdirectory=crates/flake8_to_ruff"
[build-system]
requires = ["maturin>=0.15.1,<0.16"]
requires = ["maturin>=0.15.2,<0.16"]
build-backend = "maturin"
[tool.maturin]

View File

@@ -1,6 +1,6 @@
[package]
name = "ruff"
version = "0.0.267"
version = "0.0.269"
authors.workspace = true
edition.workspace = true
rust-version.workspace = true
@@ -54,7 +54,7 @@ quick-junit = { version = "0.3.2" }
regex = { workspace = true }
result-like = { version = "0.4.6" }
rustc-hash = { workspace = true }
rustpython-common = { workspace = true }
rustpython-format = { workspace = true }
rustpython-parser = { workspace = true }
schemars = { workspace = true, optional = true }
semver = { version = "1.0.16" }
@@ -81,6 +81,5 @@ colored = { workspace = true, features = ["no-color"] }
[features]
default = []
schemars = ["dep:schemars"]
logical_lines = []
jupyter_notebook = []
ecosystem_ci = []

View File

@@ -1,4 +1,5 @@
from typing import Any, Type
from typing_extensions import override
# Error
def foo(a, b):
@@ -94,6 +95,31 @@ class Foo:
def foo(self: "Foo", a: int, *params: str, **options: Any) -> int:
pass
# ANN401
@override
def foo(self: "Foo", a: Any, *params: str, **options: str) -> int:
pass
# ANN401
@override
def foo(self: "Foo", a: int, *params: str, **options: str) -> Any:
pass
# ANN401
@override
def foo(self: "Foo", a: int, *params: Any, **options: Any) -> int:
pass
# ANN401
@override
def foo(self: "Foo", a: int, *params: Any, **options: str) -> int:
pass
# ANN401
@override
def foo(self: "Foo", a: int, *params: str, **options: Any) -> int:
pass
# OK
@classmethod
def foo(cls: Type["Foo"], a: int, b: int) -> int:

View File

@@ -0,0 +1,23 @@
import urllib.request
import requests
import httpx
async def foo():
urllib.request.urlopen("http://example.com/foo/bar").read()
async def foo():
requests.get()
async def foo():
httpx.get()
async def foo():
requests.post()
async def foo():
httpx.post()

View File

@@ -0,0 +1,31 @@
import os
import subprocess
import time
async def foo():
open("foo")
async def foo():
time.sleep(1)
async def foo():
subprocess.run("foo")
async def foo():
subprocess.call("foo")
async def foo():
subprocess.foo(0)
async def foo():
os.wait4(10)
async def foo():
os.wait(12)

View File

@@ -0,0 +1,13 @@
import os
async def foo():
os.popen()
async def foo():
os.spawnl()
async def foo():
os.fspath("foo")

View File

@@ -0,0 +1,3 @@
import paramiko
paramiko.exec_command('something; really; unsafe')

View File

@@ -74,8 +74,8 @@ def query40():
def query41():
return (
"SELECT *"
"FROM table"
"SELECT * "
"FROM table "
f"WHERE var = {var}"
)
@@ -84,7 +84,7 @@ query42 = cursor.execute("SELECT * FROM table WHERE var = %s" % var)
query43 = cursor.execute(f"SELECT * FROM table WHERE var = {var}")
query44 = cursor.execute("SELECT * FROM table WHERE var = {}".format(var))
query45 = cursor.executemany("SELECT * FROM table WHERE var = %s" % var, [])
# # pass
query = "SELECT * FROM table WHERE id = 1"
query = "DELETE FROM table WHERE id = 1"
@@ -93,3 +93,12 @@ query = "UPDATE table SET id = 1"
cursor.execute('SELECT * FROM table WHERE id = %s', var)
cursor.execute('SELECT * FROM table WHERE id = 1')
cursor.executemany('SELECT * FROM table WHERE id = %s', [var, var2])
# # INSERT without INTO (e.g. MySQL and derivatives)
query = "INSERT table VALUES (%s)" % (var,)
# # REPLACE (e.g. MySQL and derivatives, SQLite)
query = "REPLACE INTO table VALUES (%s)" % (var,)
query = "REPLACE table VALUES (%s)" % (var,)
query = "Deselect something that is not SQL even though it has a ' from ' somewhere in %s." % "there"

View File

@@ -631,3 +631,11 @@ result = function(
the_first_one = next(
(i for i in range(10) if i // 2 == 0) # COM812 fix should include the final bracket
)
foo = namedtuple(
name="foo",
status="bar",
message="sfdsdfsdgs fsdfsdf output!dsfdfsdjkg ghfskdjghkdssd sd fsdf s\n"[
:20
],
)

View File

@@ -0,0 +1,7 @@
from typing import List
import typing as t
def main(_: List[int]) -> None:
a_list: t.List[str] = []
a_list.append("hello")

View File

@@ -0,0 +1,6 @@
from typing import List
def main() -> None:
a_list: List[str] = []
a_list.append("hello")

View File

@@ -0,0 +1,8 @@
from typing import Dict, List, Optional, Set, Union, cast
def main() -> None:
a_list: List[Optional[str]] = []
a_list.append("hello")
a_dict = cast(Dict[int | None, Union[int, Set[bool]]], {})
a_dict[1] = {True, False}

View File

@@ -0,0 +1,6 @@
import typing
def main() -> None:
a_list: typing.List[str] = []
a_list.append("hello")

View File

@@ -0,0 +1,6 @@
import typing as t
def main() -> None:
a_list: t.List[str] = []
a_list.append("hello")

View File

@@ -0,0 +1,7 @@
def main() -> None:
a_list: list[str] = []
a_list.append("hello")
def hello(y: dict[str, int]) -> None:
del y

View File

@@ -0,0 +1,7 @@
def main() -> None:
a_list: list[str] | None = []
a_list.append("hello")
def hello(y: dict[str, int] | None) -> None:
del y

View File

@@ -0,0 +1,8 @@
def main() -> None:
a_list: list[str | None] = []
a_list.append("hello")
def hello(y: dict[str | None, int]) -> None:
z: tuple[str, str | None, str] = tuple(y)
del z

View File

@@ -0,0 +1,3 @@
def main() -> str:
a_str = "hello"
return a_str

View File

@@ -0,0 +1,10 @@
from typing import NamedTuple
class Stuff(NamedTuple):
x: int
def main() -> None:
a_list = Stuff(5)
print(a_list)

View File

@@ -0,0 +1,6 @@
from __future__ import annotations
def main() -> None:
a_list: list[str] = []
a_list.append("hello")

View File

@@ -0,0 +1,8 @@
import typing
IRRELEVANT = typing.TypeVar
def main() -> None:
List: list[str] = []
List.append("hello")

View File

@@ -0,0 +1,65 @@
class OneAttributeClass:
value: int
...
class OneAttributeClass2:
...
value: int
class TwoEllipsesClass:
...
...
class DocstringClass:
"""
My body only contains an ellipsis.
"""
...
class NonEmptyChild(Exception):
value: int
...
class NonEmptyChild2(Exception):
...
value: int
class NonEmptyWithInit:
value: int
...
def __init__():
pass
class EmptyClass:
...
class EmptyEllipsis:
...
class Dog:
eyes: int = 2
class WithInit:
value: int = 0
def __init__():
...
def function():
...
...

View File

@@ -0,0 +1,56 @@
# Violations of PYI013
class OneAttributeClass:
value: int
... # Error
class OneAttributeClass2:
... # Error
value: int
class MyClass:
...
value: int
class TwoEllipsesClass:
...
... # Error
class DocstringClass:
"""
My body only contains an ellipsis.
"""
... # Error
class NonEmptyChild(Exception):
value: int
... # Error
class NonEmptyChild2(Exception):
... # Error
value: int
class NonEmptyWithInit:
value: int
... # Error
def __init__():
pass
# Not violations
class EmptyClass: ...
class EmptyEllipsis: ...
class Dog:
eyes: int = 2
class WithInit:
value: int = 0
def __init__(): ...
def function(): ...
...

View File

@@ -0,0 +1,93 @@
import builtins
import typing
from typing import TypeAlias, Final
field1: int
field2: int = ...
field3 = ... # type: int # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
field4: int = 0
field41: int = 0xFFFFFFFF
field42: int = 1234567890
field43: int = -0xFFFFFFFF
field44: int = -1234567890
field5 = 0 # type: int # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int") # Y052 Need type annotation for "field5"
field6 = 0 # Y052 Need type annotation for "field6"
field7 = b"" # Y052 Need type annotation for "field7"
field71 = "foo" # Y052 Need type annotation for "field71"
field72: str = "foo"
field8 = False # Y052 Need type annotation for "field8"
field81 = -1 # Y052 Need type annotation for "field81"
field82: float = -98.43
field83 = -42j # Y052 Need type annotation for "field83"
field84 = 5 + 42j # Y052 Need type annotation for "field84"
field85 = -5 - 42j # Y052 Need type annotation for "field85"
field9 = None # Y026 Use typing_extensions.TypeAlias for type aliases, e.g. "field9: TypeAlias = None"
Field95: TypeAlias = None
Field96: TypeAlias = int | None
Field97: TypeAlias = None | typing.SupportsInt | builtins.str | float | bool
field19 = [1, 2, 3] # Y052 Need type annotation for "field19"
field191: list[int] = [1, 2, 3]
field20 = (1, 2, 3) # Y052 Need type annotation for "field20"
field201: tuple[int, ...] = (1, 2, 3)
field21 = {1, 2, 3} # Y052 Need type annotation for "field21"
field211: set[int] = {1, 2, 3}
field212 = {"foo": "bar"} # Y052 Need type annotation for "field212"
field213: dict[str, str] = {"foo": "bar"}
field22: Final = {"foo": 5}
field221: list[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] # Y015 Only simple default values are allowed for assignments
field223: list[int] = [*range(10)] # Y015 Only simple default values are allowed for assignments
field224: list[int] = list(range(10)) # Y015 Only simple default values are allowed for assignments
field225: list[object] = [{}, 1, 2] # Y015 Only simple default values are allowed for assignments
field226: tuple[str | tuple[str, ...], ...] = ("foo", ("foo", "bar")) # Y015 Only simple default values are allowed for assignments
field227: dict[str, object] = {"foo": {"foo": "bar"}} # Y015 Only simple default values are allowed for assignments
field228: dict[str, list[object]] = {"foo": []} # Y015 Only simple default values are allowed for assignments
# When parsed, this case results in `None` being placed in the `.keys` list for the `ast.Dict` node
field229: dict[int, int] = {1: 2, **{3: 4}} # Y015 Only simple default values are allowed for assignments
field23 = "foo" + "bar" # Y015 Only simple default values are allowed for assignments
field24 = b"foo" + b"bar" # Y015 Only simple default values are allowed for assignments
field25 = 5 * 5 # Y015 Only simple default values are allowed for assignments
# We shouldn't emit Y015 within functions
def f():
field26: list[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
# We shouldn't emit Y015 for __slots__ or __match_args__
class Class1:
__slots__ = (
'_one',
'_two',
'_three',
'_four',
'_five',
'_six',
'_seven',
'_eight',
'_nine',
'_ten',
'_eleven',
)
__match_args__ = (
'one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
'eleven',
)
# We shouldn't emit Y015 for __all__
__all__ = ["Class1"]
# Ignore the following for PYI015
field26 = typing.Sequence[int]
field27 = list[str]
field28 = builtins.str
field29 = str
field30 = str | bytes | None

View File

@@ -0,0 +1,100 @@
import builtins
import typing
from typing import TypeAlias, Final, NewType, TypeVar, TypeVarTuple, ParamSpec
# We shouldn't emit Y015 for simple default values
field1: int
field2: int = ...
field3 = ... # type: int # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int")
field4: int = 0
field41: int = 0xFFFFFFFF
field42: int = 1234567890
field43: int = -0xFFFFFFFF
field44: int = -1234567890
field5 = 0 # type: int # Y033 Do not use type comments in stubs (e.g. use "x: int" instead of "x = ... # type: int") # Y052 Need type annotation for "field5"
field6 = 0 # Y052 Need type annotation for "field6"
field7 = b"" # Y052 Need type annotation for "field7"
field71 = "foo" # Y052 Need type annotation for "field71"
field72: str = "foo"
field8 = False # Y052 Need type annotation for "field8"
field81 = -1 # Y052 Need type annotation for "field81"
field82: float = -98.43
field83 = -42j # Y052 Need type annotation for "field83"
field84 = 5 + 42j # Y052 Need type annotation for "field84"
field85 = -5 - 42j # Y052 Need type annotation for "field85"
field9 = None # Y026 Use typing_extensions.TypeAlias for type aliases, e.g. "field9: TypeAlias = None"
Field95: TypeAlias = None
Field96: TypeAlias = int | None
Field97: TypeAlias = None | typing.SupportsInt | builtins.str | float | bool
Field98 = NewType('MyInt', int)
Field99 = TypeVar('Field99')
Field100 = TypeVarTuple('Field100')
Field101 = ParamSpec('Field101')
field19 = [1, 2, 3] # Y052 Need type annotation for "field19"
field191: list[int] = [1, 2, 3]
field20 = (1, 2, 3) # Y052 Need type annotation for "field20"
field201: tuple[int, ...] = (1, 2, 3)
field21 = {1, 2, 3} # Y052 Need type annotation for "field21"
field211: set[int] = {1, 2, 3}
field212 = {"foo": "bar"} # Y052 Need type annotation for "field212"
field213: dict[str, str] = {"foo": "bar"}
field22: Final = {"foo": 5}
# We *should* emit Y015 for more complex default values
field221: list[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] # Y015 Only simple default values are allowed for assignments
field223: list[int] = [*range(10)] # Y015 Only simple default values are allowed for assignments
field224: list[int] = list(range(10)) # Y015 Only simple default values are allowed for assignments
field225: list[object] = [{}, 1, 2] # Y015 Only simple default values are allowed for assignments
field226: tuple[str | tuple[str, ...], ...] = ("foo", ("foo", "bar")) # Y015 Only simple default values are allowed for assignments
field227: dict[str, object] = {"foo": {"foo": "bar"}} # Y015 Only simple default values are allowed for assignments
field228: dict[str, list[object]] = {"foo": []} # Y015 Only simple default values are allowed for assignments
# When parsed, this case results in `None` being placed in the `.keys` list for the `ast.Dict` node
field229: dict[int, int] = {1: 2, **{3: 4}} # Y015 Only simple default values are allowed for assignments
field23 = "foo" + "bar" # Y015 Only simple default values are allowed for assignments
field24 = b"foo" + b"bar" # Y015 Only simple default values are allowed for assignments
field25 = 5 * 5 # Y015 Only simple default values are allowed for assignments
# We shouldn't emit Y015 within functions
def f():
field26: list[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
# We shouldn't emit Y015 for __slots__ or __match_args__
class Class1:
__slots__ = (
'_one',
'_two',
'_three',
'_four',
'_five',
'_six',
'_seven',
'_eight',
'_nine',
'_ten',
'_eleven',
)
__match_args__ = (
'one',
'two',
'three',
'four',
'five',
'six',
'seven',
'eight',
'nine',
'ten',
'eleven',
)
# We shouldn't emit Y015 for __all__
__all__ = ["Class1"]
# Ignore the following for PYI015
field26 = typing.Sequence[int]
field27 = list[str]
field28 = builtins.str
field29 = str
field30 = str | bytes | None

View File

@@ -39,3 +39,8 @@ def test_error():
message
"""
)
assert something # OK
assert something and something_else # Error
assert something and something_else and something_third # Error

View File

@@ -14,7 +14,7 @@ if key not in a_dict:
else:
var = a_dict[key]
# SIM401 (default with a complex expression)
# OK (default contains effect)
if key in a_dict:
var = a_dict[key]
else:

View File

@@ -0,0 +1,8 @@
# T001 - accepted
# TODO (evanrittenhouse): this is a valid TODO
# SOME_OTHER_TAG: this is impossible to determine
# this is not a TODO
# T001 - errors
# XXX (evanrittenhouse): this is not fine
# FIXME (evanrittenhouse): this is not fine

View File

@@ -0,0 +1,7 @@
# T002 - accepted
# TODO (evanrittenhouse): this has an author
# TODO(evanrittenhouse): this also has an author
# T002 - errors
# TODO: this has no author
# FIXME: neither does this
# TODO : and neither does this

View File

@@ -0,0 +1,29 @@
# TDO003 - accepted
# TODO: this comment has a link
# https://github.com/charliermarsh/ruff/issues/3870
# TODO: this comment has an issue
# TDO-3870
# TDO003 - errors
# TODO: this comment has no
# link after it
# TODO: here's a TODO with no link after it
def foo(x):
return x
# TODO: here's a TODO on the last line with no link
# Here's more content.
# TDO-3870
# TODO: here's a TODO on the last line with no link
# Here's more content, with a space.
# TDO-3870
# TODO: here's a TODO without an issue link
# TODO: followed by a new TODO with an issue link
# TDO-3870
# TODO: here's a TODO on the last line with no link

View File

@@ -0,0 +1,6 @@
# T004 - accepted
# TODO(evanrittenhouse): this has a colon
# T004 - errors
# TODO this has no colon
# TODO(evanrittenhouse 😀) this has no colon
# FIXME add a colon

View File

@@ -0,0 +1,6 @@
# T005 - accepted
# TODO(evanrittenhouse): this has text, while the errors do not
# T005 - errors
# TODO(evanrittenhouse):
# TODO(evanrittenhouse)
# FIXME

View File

@@ -0,0 +1,5 @@
# TDO006 - accepted
# TODO (evanrittenhouse): this is a valid TODO
# TDO006 - error
# ToDo (evanrittenhouse): invalid capitalization
# todo (evanrittenhouse): another invalid capitalization

View File

@@ -0,0 +1,8 @@
# T007 - accepted
# TODO(evanrittenhouse): this has a space after a colon
# TODO: so does this
# T007 - errors
# TODO(evanrittenhouse):this has no space after a colon
# TODO (evanrittenhouse):this doesn't either
# TODO:neither does this
# FIXME:and lastly neither does this

View File

@@ -40,3 +40,27 @@ def start():
#: E117 W191
def start():
print()
#: E112
if False: #
print()
#:
if False:
print()
#:
if False: #
print()
#:
if False:
print()
print()
#:
if False:
print()
if False:
print()
#:
if False:
print()

View File

@@ -76,3 +76,11 @@ if x == 4:
a[b1, :] == a[b1, ...]
b = a[:, b1]
#:
#: E201:1:6
spam[ ~ham]
#: Okay
x = [ #
'some value',
]

View File

@@ -160,6 +160,7 @@ if alpha[:-i]:
*a, b = (1, 2, 3)
@decorator
def squares(n):
return (i**2 for i in range(n))
@@ -168,4 +169,14 @@ ENG_PREFIXES = {
-6: "\u03bc", # Greek letter mu
-3: "m",
}
i = (
i + #
1
)
x[~y]
if i == -1:
pass
#:

View File

@@ -18,3 +18,13 @@ def foo() -> None:
#: E231
if (1,2):
pass
#: Okay
a = (1,\
2)
#: E231:2:20
mdtypes_template = {
'tag_full': [('mdtype', 'u4'), ('byte_count', 'u4')],
'tag_smalldata':[('byte_count_mdtype', 'u4'), ('data', 'S4')],
}

View File

@@ -64,3 +64,11 @@ a = 42 #  (One space one NBSP)
#: E262:2:9
# (Two spaces) Ok for block comment
a = 42 # (Two spaces)
#: E265:5:1
### Means test is not done yet
# E Means test is giving error (E)
# F Means test is failing (F)
# EF Means test is giving error and Failing
#! Means test is segfaulting
# 8 Means test runs forever

View File

@@ -56,3 +56,7 @@ if True:
def f():
print((yield))
x = (yield)
#: Okay
if (a and
b):
pass

View File

@@ -19,7 +19,7 @@ if x > 0:
else:
import e
y = x + 1
__some__magic = 1
import f

View File

@@ -0,0 +1,4 @@
#!/usr/bin/python
#
#!
#:

View File

@@ -0,0 +1,10 @@
"""Test: module bindings are preferred over local bindings, for deferred annotations."""
from __future__ import annotations
import datetime
from typing import Optional
class Class:
datetime: Optional[datetime.datetime]

View File

@@ -0,0 +1,12 @@
"""Test: module bindings are preferred over local bindings, for deferred annotations."""
from __future__ import annotations
from typing import TypeAlias, List
class Class:
List: TypeAlias = List
def bar(self) -> List:
pass

View File

@@ -0,0 +1,8 @@
"""Test: module bindings are preferred over local bindings, for deferred annotations."""
import datetime
from typing import Optional
class Class:
datetime: "Optional[datetime.datetime]"

View File

@@ -0,0 +1,24 @@
###
# Errors.
###
class A:
...
class B(A, A):
...
###
# Non-errors.
###
class C:
...
class D(C):
...
class E(A, C):
...

View File

@@ -0,0 +1,11 @@
###
# Errors.
###
incorrect_set = {"value1", 23, 5, "value1"}
incorrect_set = {1, 1}
###
# Non-errors.
###
correct_set = {"value1", 23, 5}
correct_set = {5, "5"}

View File

@@ -0,0 +1,19 @@
# Errors
(a := 42)
if True:
(b := 1)
class Foo:
(c := 1)
# OK
if a := 42:
print("Success")
a = 0
while (a := a + 1) < 10:
print("Correct")
a = (b := 1)

View File

@@ -19,3 +19,9 @@ min(
1, # This is a comment.
min(2, 3),
)
# Handle iterable expressions.
min(1, min(a))
min(1, min(i for i in range(10)))
max(1, max(a))
max(1, max(i for i in range(10)))

View File

@@ -56,3 +56,11 @@ def f(x: "List['Li' 'st[str]']") -> None:
def f(x: "Li" "st['List[str]']") -> None:
...
def f(x: typing.Deque[str]) -> None:
...
def f(x: typing.DefaultDict[str, str]) -> None:
...

View File

@@ -25,3 +25,4 @@ bytes()
bytes(b"foo")
bytes(b"""
foo""")
f"{str()}"

View File

@@ -0,0 +1,28 @@
# Errors
"{.real}".format(1)
"{0.real}".format(1)
"{a.real}".format(a=1)
"{.real}".format(1.0)
"{0.real}".format(1.0)
"{a.real}".format(a=1.0)
"{.real}".format(1j)
"{0.real}".format(1j)
"{a.real}".format(a=1j)
"{.real}".format(0b01)
"{0.real}".format(0b01)
"{a.real}".format(a=0b01)
"{}".format(1 + 2)
"{}".format([1, 2])
"{}".format({1, 2})
"{}".format({1: 2, 3: 4})
"{}".format((i for i in range(2)))
"{.real}".format(1 + 2)
"{.real}".format([1, 2])
"{.real}".format({1, 2})
"{.real}".format({1: 2, 3: 4})
"{}".format((i for i in range(2)))

View File

@@ -43,3 +43,6 @@ second = first + [
[] + foo + [ # This will be preserved, but doesn't prevent the fix
]
# Uses the non-preferred quote style, which should be retained.
f"{[*a(), 'b']}"

View File

@@ -1,4 +1,5 @@
bla = b"bla"
d = {"a": b"bla", "b": b"bla", "c": b"bla"}
def foo(one_arg):
@@ -7,6 +8,10 @@ def foo(one_arg):
f"{str(bla)}, {repr(bla)}, {ascii(bla)}" # RUF010
f"{str(d['a'])}, {repr(d['b'])}, {ascii(d['c'])}" # RUF010
f"{(str(bla))}, {(repr(bla))}, {(ascii(bla))}" # RUF010
f"{foo(bla)}" # OK
f"{str(bla, 'ascii')}, {str(bla, encoding='cp1255')}" # OK

View File

@@ -88,3 +88,12 @@ import sys # noqa: F401, RUF100
print(sys.path)
"shape: (6,)\nSeries: '' [duration[μs]]\n[\n\t0µs\n\t1µs\n\t2µs\n\t3µs\n\t4µs\n\t5µs\n]" # noqa: F401
def f():
# Ensure that the `noqa` applies to both the overlong line _and_ the unused
# variable.
a = """Lorem ipsum dolor sit amet.
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.
""" # noqa

View File

@@ -1,3 +1,8 @@
"""
Violation:
Checks for `raise` statements within `try` blocks.
"""
class MyException(Exception):
pass

View File

@@ -0,0 +1,123 @@
"""
Violation:
Checks for uses of `raise` directly after a `rescue`.
"""
class MyException(Exception):
pass
def bad():
try:
process()
except Exception:
raise
def bad():
try:
process()
except Exception:
raise
print("this code is pointless!")
def bad():
try:
process()
except:
# I am a comment, not a statement!
raise
def bad():
try:
process()
except Exception:
raise
def bad():
try:
process()
except Exception as e:
raise
def bad():
try:
process()
except Exception as e:
raise e
def bad():
try:
process()
except MyException:
raise
except Exception:
raise
def bad():
try:
process()
except MyException as e:
raise e
except Exception as e:
raise e
def bad():
try:
process()
except MyException as ex:
raise ex
except Exception as e:
raise e
def fine():
try:
process()
except Exception as e:
raise e from None
def fine():
try:
process()
except Exception as e:
raise e from Exception
def fine():
try:
process()
except Exception as e:
raise ex
def fine():
try:
process()
except MyException:
raise
except Exception:
print("bar")
def fine():
try:
process()
except Exception:
print("initiating rapid unscheduled disassembly of program")
def fine():
try:
process()
except MyException:
print("oh no!")
raise
def fine():
try:
process()
except Exception:
if True:
raise
def fine():
try:
process()
finally:
# I am a comment, not a statement!
print("but i am a statement")
raise

View File

@@ -5,7 +5,7 @@ use libcst_native::{
Codegen, CodegenState, ImportNames, ParenthesizableWhitespace, SmallStatement, Statement,
};
use ruff_text_size::{TextLen, TextRange, TextSize};
use rustpython_parser::ast::{self, ExcepthandlerKind, Expr, Keyword, Stmt, StmtKind};
use rustpython_parser::ast::{self, Excepthandler, Expr, Keyword, Ranged, Stmt};
use rustpython_parser::{lexer, Mode, Tok};
use ruff_diagnostics::Edit;
@@ -27,22 +27,22 @@ fn has_single_child(body: &[Stmt], deleted: &[&Stmt]) -> bool {
/// Determine if a child is the only statement in its body.
fn is_lone_child(child: &Stmt, parent: &Stmt, deleted: &[&Stmt]) -> Result<bool> {
match &parent.node {
StmtKind::FunctionDef(ast::StmtFunctionDef { body, .. })
| StmtKind::AsyncFunctionDef(ast::StmtAsyncFunctionDef { body, .. })
| StmtKind::ClassDef(ast::StmtClassDef { body, .. })
| StmtKind::With(ast::StmtWith { body, .. })
| StmtKind::AsyncWith(ast::StmtAsyncWith { body, .. }) => {
match parent {
Stmt::FunctionDef(ast::StmtFunctionDef { body, .. })
| Stmt::AsyncFunctionDef(ast::StmtAsyncFunctionDef { body, .. })
| Stmt::ClassDef(ast::StmtClassDef { body, .. })
| Stmt::With(ast::StmtWith { body, .. })
| Stmt::AsyncWith(ast::StmtAsyncWith { body, .. }) => {
if body.iter().contains(child) {
Ok(has_single_child(body, deleted))
} else {
bail!("Unable to find child in parent body")
}
}
StmtKind::For(ast::StmtFor { body, orelse, .. })
| StmtKind::AsyncFor(ast::StmtAsyncFor { body, orelse, .. })
| StmtKind::While(ast::StmtWhile { body, orelse, .. })
| StmtKind::If(ast::StmtIf { body, orelse, .. }) => {
Stmt::For(ast::StmtFor { body, orelse, .. })
| Stmt::AsyncFor(ast::StmtAsyncFor { body, orelse, .. })
| Stmt::While(ast::StmtWhile { body, orelse, .. })
| Stmt::If(ast::StmtIf { body, orelse, .. }) => {
if body.iter().contains(child) {
Ok(has_single_child(body, deleted))
} else if orelse.iter().contains(child) {
@@ -51,17 +51,19 @@ fn is_lone_child(child: &Stmt, parent: &Stmt, deleted: &[&Stmt]) -> Result<bool>
bail!("Unable to find child in parent body")
}
}
StmtKind::Try(ast::StmtTry {
Stmt::Try(ast::StmtTry {
body,
handlers,
orelse,
finalbody,
range: _,
})
| StmtKind::TryStar(ast::StmtTryStar {
| Stmt::TryStar(ast::StmtTryStar {
body,
handlers,
orelse,
finalbody,
range: _,
}) => {
if body.iter().contains(child) {
Ok(has_single_child(body, deleted))
@@ -69,10 +71,8 @@ fn is_lone_child(child: &Stmt, parent: &Stmt, deleted: &[&Stmt]) -> Result<bool>
Ok(has_single_child(orelse, deleted))
} else if finalbody.iter().contains(child) {
Ok(has_single_child(finalbody, deleted))
} else if let Some(body) = handlers.iter().find_map(|handler| match &handler.node {
ExcepthandlerKind::ExceptHandler(ast::ExcepthandlerExceptHandler {
body, ..
}) => {
} else if let Some(body) = handlers.iter().find_map(|handler| match handler {
Excepthandler::ExceptHandler(ast::ExcepthandlerExceptHandler { body, .. }) => {
if body.iter().contains(child) {
Some(body)
} else {
@@ -85,7 +85,7 @@ fn is_lone_child(child: &Stmt, parent: &Stmt, deleted: &[&Stmt]) -> Result<bool>
bail!("Unable to find child in parent body")
}
}
StmtKind::Match(ast::StmtMatch { cases, .. }) => {
Stmt::Match(ast::StmtMatch { cases, .. }) => {
if let Some(body) = cases.iter().find_map(|case| {
if case.body.iter().contains(child) {
Some(&case.body)
@@ -268,10 +268,7 @@ pub(crate) fn remove_unused_imports<'a>(
let module = module.map(compose_module_path);
let member = compose_module_path(&alias.name);
let mut full_name = String::with_capacity(
relative.len()
+ module.as_ref().map_or(0, std::string::String::len)
+ member.len()
+ 1,
relative.len() + module.as_ref().map_or(0, String::len) + member.len() + 1,
);
for _ in 0..relative.len() {
full_name.push('.');

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,42 @@
use ruff_diagnostics::Diagnostic;
use crate::checkers::ast::RuleContext;
use crate::registry::Rule;
use crate::settings::Settings;
/// Trait for a lint rule that can be run on an AST node of type `T`.
pub(crate) trait Analyzer<T>: Sized {
/// The [`Rule`] that this analyzer implements.
fn rule() -> Rule;
/// Run the analyzer on the given node.
fn run(diagnostics: &mut Vec<Diagnostic>, checker: &RuleContext, node: &T);
}
/// Internal representation of a single [`Rule`] that can be run on an AST node of type `T`.
pub(super) struct RegisteredRule<T> {
rule: Rule,
run: Executor<T>,
}
impl<T> RegisteredRule<T> {
pub(super) fn new<R: Analyzer<T> + 'static>() -> Self {
Self {
rule: R::rule(),
run: R::run,
}
}
#[inline]
pub(super) fn enabled(&self, settings: &Settings) -> bool {
settings.rules.enabled(self.rule)
}
#[inline]
pub(super) fn run(&self, diagnostics: &mut Vec<Diagnostic>, context: &RuleContext, node: &T) {
(self.run)(diagnostics, context, node);
}
}
/// Executor for an [`Analyzer`] as a generic function pointer.
type Executor<T> = fn(diagnostics: &mut Vec<Diagnostic>, checker: &RuleContext, node: &T);

View File

@@ -2,7 +2,7 @@
use std::borrow::Cow;
use std::path::Path;
use rustpython_parser::ast::{self, StmtKind, Suite};
use rustpython_parser::ast::{self, Ranged, Stmt, Suite};
use ruff_diagnostics::Diagnostic;
use ruff_python_ast::helpers::to_module_path;
@@ -28,18 +28,19 @@ fn extract_import_map(path: &Path, package: Option<&Path>, blocks: &[&Block]) ->
let num_imports = blocks.iter().map(|block| block.imports.len()).sum();
let mut module_imports = Vec::with_capacity(num_imports);
for stmt in blocks.iter().flat_map(|block| &block.imports) {
match &stmt.node {
StmtKind::Import(ast::StmtImport { names }) => {
match stmt {
Stmt::Import(ast::StmtImport { names, range: _ }) => {
module_imports.extend(
names
.iter()
.map(|name| ModuleImport::new(name.node.name.to_string(), stmt.range())),
.map(|name| ModuleImport::new(name.name.to_string(), stmt.range())),
);
}
StmtKind::ImportFrom(ast::StmtImportFrom {
Stmt::ImportFrom(ast::StmtImportFrom {
module,
names,
level,
range: _,
}) => {
let level = level.map_or(0, |level| level.to_usize());
let module = if let Some(module) = module {
@@ -60,10 +61,10 @@ fn extract_import_map(path: &Path, package: Option<&Path>, blocks: &[&Block]) ->
Cow::Owned(module_path[..module_path.len() - level].join("."))
};
module_imports.extend(names.iter().map(|name| {
ModuleImport::new(format!("{}.{}", module, name.node.name), name.range())
ModuleImport::new(format!("{}.{}", module, name.name), name.range())
}));
}
_ => panic!("Expected StmtKind::Import | StmtKind::ImportFrom"),
_ => panic!("Expected Stmt::Import | Stmt::ImportFrom"),
}
}

View File

@@ -38,19 +38,11 @@ pub(crate) fn check_logical_lines(
) -> Vec<Diagnostic> {
let mut context = LogicalLinesContext::new(settings);
#[cfg(feature = "logical_lines")]
let should_fix_missing_whitespace = settings.rules.should_fix(Rule::MissingWhitespace);
#[cfg(not(feature = "logical_lines"))]
let should_fix_missing_whitespace = false;
#[cfg(feature = "logical_lines")]
let should_fix_whitespace_before_parameters =
settings.rules.should_fix(Rule::WhitespaceBeforeParameters);
#[cfg(not(feature = "logical_lines"))]
let should_fix_whitespace_before_parameters = false;
let mut prev_line = None;
let mut prev_indent_level = None;
let indent_char = stylist.indentation().as_char();
@@ -65,17 +57,18 @@ pub(crate) fn check_logical_lines(
if line
.flags()
.contains(TokenFlags::OPERATOR | TokenFlags::PUNCTUATION)
.intersects(TokenFlags::OPERATOR | TokenFlags::BRACKET | TokenFlags::PUNCTUATION)
{
extraneous_whitespace(&line, &mut context);
}
if line.flags().contains(TokenFlags::KEYWORD) {
whitespace_around_keywords(&line, &mut context);
missing_whitespace_after_keyword(&line, &mut context);
}
if line.flags().contains(TokenFlags::COMMENT) {
whitespace_before_comment(&line, locator, prev_line.is_none(), &mut context);
whitespace_before_comment(&line, locator, &mut context);
}
if line.flags().contains(TokenFlags::BRACKET) {
@@ -154,97 +147,3 @@ impl<'a> LogicalLinesContext<'a> {
}
}
}
#[cfg(test)]
mod tests {
use rustpython_parser::lexer::LexResult;
use rustpython_parser::{lexer, Mode};
use crate::rules::pycodestyle::rules::logical_lines::LogicalLines;
use ruff_python_ast::source_code::Locator;
#[test]
fn split_logical_lines() {
let contents = r#"
x = 1
y = 2
z = x + 1"#;
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let actual: Vec<String> = LogicalLines::from_tokens(&lxr, &locator)
.into_iter()
.map(|line| line.text_trimmed().to_string())
.collect();
let expected = vec![
"x = 1".to_string(),
"y = 2".to_string(),
"z = x + 1".to_string(),
];
assert_eq!(actual, expected);
let contents = r#"
x = [
1,
2,
3,
]
y = 2
z = x + 1"#;
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let actual: Vec<String> = LogicalLines::from_tokens(&lxr, &locator)
.into_iter()
.map(|line| line.text_trimmed().to_string())
.collect();
let expected = vec![
"x = [\n 1,\n 2,\n 3,\n]".to_string(),
"y = 2".to_string(),
"z = x + 1".to_string(),
];
assert_eq!(actual, expected);
let contents = "x = 'abc'";
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let actual: Vec<String> = LogicalLines::from_tokens(&lxr, &locator)
.into_iter()
.map(|line| line.text_trimmed().to_string())
.collect();
let expected = vec!["x = 'abc'".to_string()];
assert_eq!(actual, expected);
let contents = r#"
def f():
x = 1
f()"#;
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let actual: Vec<String> = LogicalLines::from_tokens(&lxr, &locator)
.into_iter()
.map(|line| line.text_trimmed().to_string())
.collect();
let expected = vec!["def f():", "x = 1", "f()"];
assert_eq!(actual, expected);
let contents = r#"
def f():
"""Docstring goes here."""
# Comment goes here.
x = 1
f()"#;
let lxr: Vec<LexResult> = lexer::lex(contents, Mode::Module).collect();
let locator = Locator::new(contents);
let actual: Vec<String> = LogicalLines::from_tokens(&lxr, &locator)
.into_iter()
.map(|line| line.text_trimmed().to_string())
.collect();
let expected = vec![
"def f():",
"\"\"\"Docstring goes here.\"\"\"",
"",
"x = 1",
"f()",
];
assert_eq!(actual, expected);
}
}

View File

@@ -1,7 +1,6 @@
pub(crate) mod ast;
pub(crate) mod filesystem;
pub(crate) mod imports;
#[cfg(feature = "logical_lines")]
pub(crate) mod logical_lines;
pub(crate) mod noqa;
pub(crate) mod physical_lines;

View File

@@ -55,7 +55,6 @@ pub(crate) fn check_physical_lines(
let mut commented_lines_iter = indexer.comment_ranges().iter().peekable();
let mut doc_lines_iter = doc_lines.iter().peekable();
let string_lines = indexer.triple_quoted_string_ranges();
for (index, line) in locator.contents().universal_newlines().enumerate() {
while commented_lines_iter
@@ -151,7 +150,7 @@ pub(crate) fn check_physical_lines(
}
if enforce_tab_indentation {
if let Some(diagnostic) = tab_indentation(&line, string_lines) {
if let Some(diagnostic) = tab_indentation(&line, indexer) {
diagnostics.push(diagnostic);
}
}

View File

@@ -7,8 +7,8 @@ use crate::lex::docstring_detection::StateMachine;
use crate::registry::{AsRule, Rule};
use crate::rules::ruff::rules::Context;
use crate::rules::{
eradicate, flake8_commas, flake8_implicit_str_concat, flake8_pyi, flake8_quotes, pycodestyle,
pylint, pyupgrade, ruff,
eradicate, flake8_commas, flake8_implicit_str_concat, flake8_pyi, flake8_quotes, flake8_todos,
pycodestyle, pylint, pyupgrade, ruff,
};
use crate::settings::Settings;
use ruff_diagnostics::Diagnostic;
@@ -59,6 +59,15 @@ pub(crate) fn check_tokens(
]);
let enforce_extraneous_parenthesis = settings.rules.enabled(Rule::ExtraneousParentheses);
let enforce_type_comment_in_stub = settings.rules.enabled(Rule::TypeCommentInStub);
let enforce_todos = settings.rules.any_enabled(&[
Rule::InvalidTodoTag,
Rule::MissingTodoAuthor,
Rule::MissingTodoLink,
Rule::MissingTodoColon,
Rule::MissingTodoDescription,
Rule::InvalidTodoCapitalization,
Rule::MissingSpaceAfterTodoColon,
]);
// RUF001, RUF002, RUF003
if enforce_ambiguous_unicode_character {
@@ -179,5 +188,14 @@ pub(crate) fn check_tokens(
diagnostics.extend(flake8_pyi::rules::type_comment_in_stub(tokens));
}
// TD001, TD002, TD003, TD004, TD005, TD006, TD007
if enforce_todos {
diagnostics.extend(
flake8_todos::rules::todos(tokens, settings)
.into_iter()
.filter(|diagnostic| settings.rules.enabled(diagnostic.kind.rule())),
);
}
diagnostics
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
use libcst_native::{Expression, NameOrAttribute};
fn compose_call_path_inner<'a>(expr: &'a Expression, parts: &mut Vec<&'a str>) {
match &expr {
match expr {
Expression::Call(expr) => {
compose_call_path_inner(&expr.func, parts);
}

View File

@@ -1,7 +1,8 @@
use anyhow::{bail, Result};
use libcst_native::{
Attribute, Call, Comparison, Dict, Expr, Expression, Import, ImportAlias, ImportFrom,
ImportNames, Module, SimpleString, SmallStatement, Statement,
Attribute, Call, Comparison, Dict, Expr, Expression, FormattedString, FormattedStringContent,
FormattedStringExpression, Import, ImportAlias, ImportFrom, ImportNames, Module, Name,
SimpleString, SmallStatement, Statement,
};
pub(crate) fn match_module(module_text: &str) -> Result<Module> {
@@ -14,7 +15,7 @@ pub(crate) fn match_module(module_text: &str) -> Result<Module> {
pub(crate) fn match_expression(expression_text: &str) -> Result<Expression> {
match libcst_native::parse_expression(expression_text) {
Ok(expression) => Ok(expression),
Err(_) => bail!("Failed to extract CST from source"),
Err(_) => bail!("Failed to extract expression from source"),
}
}
@@ -111,3 +112,33 @@ pub(crate) fn match_simple_string<'a, 'b>(
bail!("Expected Expression::SimpleString")
}
}
pub(crate) fn match_formatted_string<'a, 'b>(
expression: &'a mut Expression<'b>,
) -> Result<&'a mut FormattedString<'b>> {
if let Expression::FormattedString(formatted_string) = expression {
Ok(formatted_string)
} else {
bail!("Expected Expression::FormattedString")
}
}
pub(crate) fn match_formatted_string_expression<'a, 'b>(
formatted_string_content: &'a mut FormattedStringContent<'b>,
) -> Result<&'a mut FormattedStringExpression<'b>> {
if let FormattedStringContent::Expression(formatted_string_expression) =
formatted_string_content
{
Ok(formatted_string_expression)
} else {
bail!("Expected FormattedStringContent::Expression")
}
}
pub(crate) fn match_name<'a, 'b>(expression: &'a mut Expression<'b>) -> Result<&'a mut Name<'b>> {
if let Expression::Name(name) = expression {
Ok(name)
} else {
bail!("Expected Expression::Name")
}
}

View File

@@ -1,12 +1,13 @@
//! Extract `# noqa` and `# isort: skip` directives from tokenized source.
use crate::noqa::NoqaMapping;
use bitflags::bitflags;
use ruff_python_ast::source_code::{Indexer, Locator};
use ruff_text_size::{TextLen, TextRange, TextSize};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
use ruff_python_ast::source_code::{Indexer, Locator};
use crate::noqa::NoqaMapping;
use crate::settings::Settings;
bitflags! {
@@ -102,7 +103,10 @@ pub fn extract_noqa_line_for(
..
} => {
if locator.contains_line_break(*range) {
string_mappings.push(*range);
string_mappings.push(TextRange::new(
locator.line_start(range.start()),
range.end(),
));
}
}
@@ -219,11 +223,12 @@ pub fn extract_isort_directives(lxr: &[LexResult], locator: &Locator) -> IsortDi
#[cfg(test)]
mod tests {
use ruff_python_ast::source_code::{Indexer, Locator};
use ruff_text_size::{TextLen, TextRange, TextSize};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::{lexer, Mode};
use ruff_python_ast::source_code::{Indexer, Locator};
use crate::directives::{extract_isort_directives, extract_noqa_line_for};
use crate::noqa::NoqaMapping;
@@ -271,7 +276,7 @@ y = 2
z = x + 1";
assert_eq!(
noqa_mappings(contents),
NoqaMapping::from_iter([TextRange::new(TextSize::from(4), TextSize::from(22)),])
NoqaMapping::from_iter([TextRange::new(TextSize::from(0), TextSize::from(22)),])
);
let contents = "x = 1
@@ -282,7 +287,7 @@ ghi
z = 2";
assert_eq!(
noqa_mappings(contents),
NoqaMapping::from_iter([TextRange::new(TextSize::from(10), TextSize::from(28))])
NoqaMapping::from_iter([TextRange::new(TextSize::from(6), TextSize::from(28))])
);
let contents = "x = 1
@@ -292,7 +297,7 @@ ghi
'''";
assert_eq!(
noqa_mappings(contents),
NoqaMapping::from_iter([TextRange::new(TextSize::from(10), TextSize::from(28))])
NoqaMapping::from_iter([TextRange::new(TextSize::from(6), TextSize::from(28))])
);
let contents = r#"x = \

View File

@@ -3,8 +3,8 @@
use std::iter::FusedIterator;
use ruff_text_size::{TextRange, TextSize};
use rustpython_parser::ast::{self, Constant, ExprKind, Stmt, StmtKind, Suite};
use ruff_text_size::TextSize;
use rustpython_parser::ast::{self, Constant, Expr, Ranged, Stmt, Suite};
use rustpython_parser::lexer::LexResult;
use rustpython_parser::Tok;
@@ -13,24 +13,19 @@ use ruff_python_ast::source_code::Locator;
use ruff_python_ast::statement_visitor::{walk_stmt, StatementVisitor};
/// Extract doc lines (standalone comments) from a token sequence.
pub(crate) fn doc_lines_from_tokens<'a>(
lxr: &'a [LexResult],
locator: &'a Locator<'a>,
) -> DocLines<'a> {
DocLines::new(lxr, locator)
pub(crate) fn doc_lines_from_tokens(lxr: &[LexResult]) -> DocLines {
DocLines::new(lxr)
}
pub(crate) struct DocLines<'a> {
inner: std::iter::Flatten<core::slice::Iter<'a, LexResult>>,
locator: &'a Locator<'a>,
prev: TextSize,
}
impl<'a> DocLines<'a> {
fn new(lxr: &'a [LexResult], locator: &'a Locator) -> Self {
fn new(lxr: &'a [LexResult]) -> Self {
Self {
inner: lxr.iter().flatten(),
locator,
prev: TextSize::default(),
}
}
@@ -46,15 +41,11 @@ impl Iterator for DocLines<'_> {
match tok {
Tok::Comment(..) => {
if at_start_of_line
|| self
.locator
.contains_line_break(TextRange::new(self.prev, range.start()))
{
if at_start_of_line {
break Some(range.start());
}
}
Tok::Newline => {
Tok::Newline | Tok::NonLogicalNewline => {
at_start_of_line = true;
}
Tok::Indent | Tok::Dedent => {
@@ -79,11 +70,11 @@ struct StringLinesVisitor<'a> {
impl StatementVisitor<'_> for StringLinesVisitor<'_> {
fn visit_stmt(&mut self, stmt: &Stmt) {
if let StmtKind::Expr(ast::StmtExpr { value }) = &stmt.node {
if let ExprKind::Constant(ast::ExprConstant {
if let Stmt::Expr(ast::StmtExpr { value, range: _ }) = stmt {
if let Expr::Constant(ast::ExprConstant {
value: Constant::Str(..),
..
}) = &value.node
}) = value.as_ref()
{
for line in UniversalNewlineIterator::with_offset(
self.locator.slice(value.range()),

View File

@@ -1,6 +1,6 @@
//! Extract docstrings from an AST.
use rustpython_parser::ast::{self, Constant, Expr, ExprKind, Stmt, StmtKind};
use rustpython_parser::ast::{self, Constant, Expr, Stmt};
use ruff_python_semantic::definition::{Definition, DefinitionId, Definitions, Member, MemberKind};
@@ -8,13 +8,13 @@ use ruff_python_semantic::definition::{Definition, DefinitionId, Definitions, Me
pub(crate) fn docstring_from(suite: &[Stmt]) -> Option<&Expr> {
let stmt = suite.first()?;
// Require the docstring to be a standalone expression.
let StmtKind::Expr(ast::StmtExpr { value }) = &stmt.node else {
let Stmt::Expr(ast::StmtExpr { value, range: _ }) = stmt else {
return None;
};
// Only match strings.
if !matches!(
&value.node,
ExprKind::Constant(ast::ExprConstant {
value.as_ref(),
Expr::Constant(ast::ExprConstant {
value: Constant::Str(_),
..
})
@@ -29,10 +29,9 @@ pub(crate) fn extract_docstring<'a>(definition: &'a Definition<'a>) -> Option<&'
match definition {
Definition::Module(module) => docstring_from(module.python_ast),
Definition::Member(member) => {
if let StmtKind::ClassDef(ast::StmtClassDef { body, .. })
| StmtKind::FunctionDef(ast::StmtFunctionDef { body, .. })
| StmtKind::AsyncFunctionDef(ast::StmtAsyncFunctionDef { body, .. }) =
&member.stmt.node
if let Stmt::ClassDef(ast::StmtClassDef { body, .. })
| Stmt::FunctionDef(ast::StmtFunctionDef { body, .. })
| Stmt::AsyncFunctionDef(ast::StmtAsyncFunctionDef { body, .. }) = &member.stmt
{
docstring_from(body)
} else {

View File

@@ -2,7 +2,7 @@ use std::fmt::{Debug, Formatter};
use std::ops::Deref;
use ruff_text_size::{TextRange, TextSize};
use rustpython_parser::ast::Expr;
use rustpython_parser::ast::{Expr, Ranged};
use ruff_python_semantic::definition::Definition;
@@ -29,15 +29,15 @@ impl<'a> Docstring<'a> {
DocstringBody { docstring: self }
}
pub(crate) const fn start(&self) -> TextSize {
pub(crate) fn start(&self) -> TextSize {
self.expr.start()
}
pub(crate) const fn end(&self) -> TextSize {
pub(crate) fn end(&self) -> TextSize {
self.expr.end()
}
pub(crate) const fn range(&self) -> TextRange {
pub(crate) fn range(&self) -> TextRange {
self.expr.range()
}

View File

@@ -1,365 +0,0 @@
//! Add and modify import statements to make module members available during fix execution.
use anyhow::Result;
use libcst_native::{Codegen, CodegenState, ImportAlias, Name, NameOrAttribute};
use ruff_text_size::TextSize;
use rustpython_parser::ast::{self, Stmt, StmtKind, Suite};
use rustpython_parser::{lexer, Mode, Tok};
use ruff_diagnostics::Edit;
use ruff_python_ast::helpers::is_docstring_stmt;
use ruff_python_ast::imports::AnyImport;
use ruff_python_ast::source_code::{Locator, Stylist};
use crate::cst::matchers::{match_aliases, match_import_from, match_module};
pub struct Importer<'a> {
python_ast: &'a Suite,
locator: &'a Locator<'a>,
stylist: &'a Stylist<'a>,
ordered_imports: Vec<&'a Stmt>,
}
impl<'a> Importer<'a> {
pub fn new(python_ast: &'a Suite, locator: &'a Locator<'a>, stylist: &'a Stylist<'a>) -> Self {
Self {
python_ast,
locator,
stylist,
ordered_imports: Vec::default(),
}
}
/// Visit a top-level import statement.
pub fn visit_import(&mut self, import: &'a Stmt) {
self.ordered_imports.push(import);
}
/// Return the import statement that precedes the given position, if any.
fn preceding_import(&self, at: TextSize) -> Option<&Stmt> {
self.ordered_imports
.partition_point(|stmt| stmt.start() < at)
.checked_sub(1)
.map(|idx| self.ordered_imports[idx])
}
/// Add an import statement to import the given module.
///
/// If there are no existing imports, the new import will be added at the top
/// of the file. Otherwise, it will be added after the most recent top-level
/// import statement.
pub fn add_import(&self, import: &AnyImport, at: TextSize) -> Edit {
let required_import = import.to_string();
if let Some(stmt) = self.preceding_import(at) {
// Insert after the last top-level import.
let Insertion {
prefix,
location,
suffix,
} = end_of_statement_insertion(stmt, self.locator, self.stylist);
let content = format!("{prefix}{required_import}{suffix}");
Edit::insertion(content, location)
} else {
// Insert at the top of the file.
let Insertion {
prefix,
location,
suffix,
} = top_of_file_insertion(self.python_ast, self.locator, self.stylist);
let content = format!("{prefix}{required_import}{suffix}");
Edit::insertion(content, location)
}
}
/// Return the top-level [`Stmt`] that imports the given module using `StmtKind::ImportFrom`
/// preceding the given position, if any.
pub fn find_import_from(&self, module: &str, at: TextSize) -> Option<&Stmt> {
let mut import_from = None;
for stmt in &self.ordered_imports {
if stmt.start() >= at {
break;
}
if let StmtKind::ImportFrom(ast::StmtImportFrom {
module: name,
level,
..
}) = &stmt.node
{
if level.map_or(true, |level| level.to_u32() == 0)
&& name.as_ref().map_or(false, |name| name == module)
{
import_from = Some(*stmt);
}
}
}
import_from
}
/// Add the given member to an existing `StmtKind::ImportFrom` statement.
pub fn add_member(&self, stmt: &Stmt, member: &str) -> Result<Edit> {
let mut tree = match_module(self.locator.slice(stmt.range()))?;
let import_from = match_import_from(&mut tree)?;
let aliases = match_aliases(import_from)?;
aliases.push(ImportAlias {
name: NameOrAttribute::N(Box::new(Name {
value: member,
lpar: vec![],
rpar: vec![],
})),
asname: None,
comma: aliases.last().and_then(|alias| alias.comma.clone()),
});
let mut state = CodegenState {
default_newline: &self.stylist.line_ending(),
default_indent: self.stylist.indentation(),
..CodegenState::default()
};
tree.codegen(&mut state);
Ok(Edit::range_replacement(state.to_string(), stmt.range()))
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
struct Insertion {
/// The content to add before the insertion.
prefix: &'static str,
/// The location at which to insert.
location: TextSize,
/// The content to add after the insertion.
suffix: &'static str,
}
impl Insertion {
fn new(prefix: &'static str, location: TextSize, suffix: &'static str) -> Self {
Self {
prefix,
location,
suffix,
}
}
}
/// Find the end of the last docstring.
fn match_docstring_end(body: &[Stmt]) -> Option<TextSize> {
let mut iter = body.iter();
let Some(mut stmt) = iter.next() else {
return None;
};
if !is_docstring_stmt(stmt) {
return None;
}
for next in iter {
if !is_docstring_stmt(next) {
break;
}
stmt = next;
}
Some(stmt.end())
}
/// Find the location at which an "end-of-statement" import should be inserted,
/// along with a prefix and suffix to use for the insertion.
///
/// For example, given the following code:
///
/// ```python
/// """Hello, world!"""
///
/// import os
/// import math
///
///
/// def foo():
/// pass
/// ```
///
/// The location returned will be the start of new line after the last
/// import statement, which in this case is the line after `import math`,
/// along with a trailing newline suffix.
fn end_of_statement_insertion(stmt: &Stmt, locator: &Locator, stylist: &Stylist) -> Insertion {
let location = stmt.end();
let mut tokens =
lexer::lex_starts_at(locator.after(location), Mode::Module, location).flatten();
if let Some((Tok::Semi, range)) = tokens.next() {
// If the first token after the docstring is a semicolon, insert after the semicolon as an
// inline statement;
Insertion::new(" ", range.end(), ";")
} else {
// Otherwise, insert on the next line.
Insertion::new(
"",
locator.full_line_end(location),
stylist.line_ending().as_str(),
)
}
}
/// Find the location at which a "top-of-file" import should be inserted,
/// along with a prefix and suffix to use for the insertion.
///
/// For example, given the following code:
///
/// ```python
/// """Hello, world!"""
///
/// import os
/// ```
///
/// The location returned will be the start of the `import os` statement,
/// along with a trailing newline suffix.
fn top_of_file_insertion(body: &[Stmt], locator: &Locator, stylist: &Stylist) -> Insertion {
// Skip over any docstrings.
let mut location = if let Some(location) = match_docstring_end(body) {
// If the first token after the docstring is a semicolon, insert after the semicolon as an
// inline statement;
let first_token = lexer::lex_starts_at(locator.after(location), Mode::Module, location)
.flatten()
.next();
if let Some((Tok::Semi, range)) = first_token {
return Insertion::new(" ", range.end(), ";");
}
// Otherwise, advance to the next row.
locator.full_line_end(location)
} else {
TextSize::default()
};
// Skip over any comments and empty lines.
for (tok, range) in
lexer::lex_starts_at(locator.after(location), Mode::Module, location).flatten()
{
if matches!(tok, Tok::Comment(..) | Tok::Newline) {
location = locator.full_line_end(range.end());
} else {
break;
}
}
return Insertion::new("", location, stylist.line_ending().as_str());
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use ruff_text_size::TextSize;
use rustpython_parser as parser;
use rustpython_parser::lexer::LexResult;
use ruff_python_ast::newlines::LineEnding;
use ruff_python_ast::source_code::{Locator, Stylist};
use crate::importer::{top_of_file_insertion, Insertion};
fn insert(contents: &str) -> Result<Insertion> {
let program = parser::parse_program(contents, "<filename>")?;
let tokens: Vec<LexResult> = ruff_rustpython::tokenize(contents);
let locator = Locator::new(contents);
let stylist = Stylist::from_tokens(&tokens, &locator);
Ok(top_of_file_insertion(&program, &locator, &stylist))
}
#[test]
fn top_of_file_insertions() -> Result<()> {
let contents = "";
assert_eq!(
insert(contents)?,
Insertion::new("", TextSize::from(0), LineEnding::default().as_str())
);
let contents = r#"
"""Hello, world!""""#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", TextSize::from(19), LineEnding::default().as_str())
);
let contents = r#"
"""Hello, world!"""
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", TextSize::from(20), "\n")
);
let contents = r#"
"""Hello, world!"""
"""Hello, world!"""
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", TextSize::from(40), "\n")
);
let contents = r#"
x = 1
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", TextSize::from(0), "\n")
);
let contents = r#"
#!/usr/bin/env python3
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", TextSize::from(23), "\n")
);
let contents = r#"
#!/usr/bin/env python3
"""Hello, world!"""
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", TextSize::from(43), "\n")
);
let contents = r#"
"""Hello, world!"""
#!/usr/bin/env python3
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", TextSize::from(43), "\n")
);
let contents = r#"
"""%s""" % "Hello, world!"
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", TextSize::from(0), "\n")
);
let contents = r#"
"""Hello, world!"""; x = 1
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new(" ", TextSize::from(20), ";")
);
let contents = r#"
"""Hello, world!"""; x = 1; y = \
2
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new(" ", TextSize::from(20), ";")
);
Ok(())
}
}

View File

@@ -0,0 +1,261 @@
use ruff_diagnostics::Edit;
use ruff_text_size::TextSize;
use rustpython_parser::ast::{Ranged, Stmt};
use rustpython_parser::{lexer, Mode, Tok};
use ruff_python_ast::helpers::is_docstring_stmt;
use ruff_python_ast::source_code::{Locator, Stylist};
#[derive(Debug, Clone, PartialEq, Eq)]
pub(super) struct Insertion {
/// The content to add before the insertion.
prefix: &'static str,
/// The location at which to insert.
location: TextSize,
/// The content to add after the insertion.
suffix: &'static str,
}
impl Insertion {
/// Create an [`Insertion`] to insert (e.g.) an import after the end of the given [`Stmt`],
/// along with a prefix and suffix to use for the insertion.
///
/// For example, given the following code:
///
/// ```python
/// """Hello, world!"""
///
/// import os
/// import math
///
///
/// def foo():
/// pass
/// ```
///
/// The insertion returned will begin after the newline after the last import statement, which
/// in this case is the line after `import math`, and will include a trailing newline suffix.
pub(super) fn end_of_statement(stmt: &Stmt, locator: &Locator, stylist: &Stylist) -> Insertion {
let location = stmt.end();
let mut tokens =
lexer::lex_starts_at(locator.after(location), Mode::Module, location).flatten();
if let Some((Tok::Semi, range)) = tokens.next() {
// If the first token after the docstring is a semicolon, insert after the semicolon as an
// inline statement;
Insertion::new(" ", range.end(), ";")
} else {
// Otherwise, insert on the next line.
Insertion::new(
"",
locator.full_line_end(location),
stylist.line_ending().as_str(),
)
}
}
/// Create an [`Insertion`] to insert (e.g.) an import statement at the "top" of a given file,
/// along with a prefix and suffix to use for the insertion.
///
/// For example, given the following code:
///
/// ```python
/// """Hello, world!"""
///
/// import os
/// ```
///
/// The insertion returned will begin at the start of the `import os` statement, and will
/// include a trailing newline suffix.
pub(super) fn top_of_file(body: &[Stmt], locator: &Locator, stylist: &Stylist) -> Insertion {
// Skip over any docstrings.
let mut location = if let Some(location) = match_docstring_end(body) {
// If the first token after the docstring is a semicolon, insert after the semicolon as an
// inline statement;
let first_token = lexer::lex_starts_at(locator.after(location), Mode::Module, location)
.flatten()
.next();
if let Some((Tok::Semi, range)) = first_token {
return Insertion::new(" ", range.end(), ";");
}
// Otherwise, advance to the next row.
locator.full_line_end(location)
} else {
TextSize::default()
};
// Skip over any comments and empty lines.
for (tok, range) in
lexer::lex_starts_at(locator.after(location), Mode::Module, location).flatten()
{
if matches!(tok, Tok::Comment(..) | Tok::Newline) {
location = locator.full_line_end(range.end());
} else {
break;
}
}
Insertion::new("", location, stylist.line_ending().as_str())
}
fn new(prefix: &'static str, location: TextSize, suffix: &'static str) -> Self {
Self {
prefix,
location,
suffix,
}
}
/// Convert this [`Insertion`] into an [`Edit`] that inserts the given content.
pub(super) fn into_edit(self, content: &str) -> Edit {
let Insertion {
prefix,
location,
suffix,
} = self;
Edit::insertion(format!("{prefix}{content}{suffix}"), location)
}
}
/// Find the end of the last docstring.
fn match_docstring_end(body: &[Stmt]) -> Option<TextSize> {
let mut iter = body.iter();
let Some(mut stmt) = iter.next() else {
return None;
};
if !is_docstring_stmt(stmt) {
return None;
}
for next in iter {
if !is_docstring_stmt(next) {
break;
}
stmt = next;
}
Some(stmt.end())
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use ruff_text_size::TextSize;
use rustpython_parser as parser;
use rustpython_parser::lexer::LexResult;
use ruff_python_ast::newlines::LineEnding;
use ruff_python_ast::source_code::{Locator, Stylist};
use super::Insertion;
fn insert(contents: &str) -> Result<Insertion> {
let program = parser::parse_program(contents, "<filename>")?;
let tokens: Vec<LexResult> = ruff_rustpython::tokenize(contents);
let locator = Locator::new(contents);
let stylist = Stylist::from_tokens(&tokens, &locator);
Ok(Insertion::top_of_file(&program, &locator, &stylist))
}
#[test]
fn top_of_file() -> Result<()> {
let contents = "";
assert_eq!(
insert(contents)?,
Insertion::new("", TextSize::from(0), LineEnding::default().as_str())
);
let contents = r#"
"""Hello, world!""""#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", TextSize::from(19), LineEnding::default().as_str())
);
let contents = r#"
"""Hello, world!"""
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", TextSize::from(20), "\n")
);
let contents = r#"
"""Hello, world!"""
"""Hello, world!"""
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", TextSize::from(40), "\n")
);
let contents = r#"
x = 1
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", TextSize::from(0), "\n")
);
let contents = r#"
#!/usr/bin/env python3
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", TextSize::from(23), "\n")
);
let contents = r#"
#!/usr/bin/env python3
"""Hello, world!"""
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", TextSize::from(43), "\n")
);
let contents = r#"
"""Hello, world!"""
#!/usr/bin/env python3
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", TextSize::from(43), "\n")
);
let contents = r#"
"""%s""" % "Hello, world!"
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new("", TextSize::from(0), "\n")
);
let contents = r#"
"""Hello, world!"""; x = 1
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new(" ", TextSize::from(20), ";")
);
let contents = r#"
"""Hello, world!"""; x = 1; y = \
2
"#
.trim_start();
assert_eq!(
insert(contents)?,
Insertion::new(" ", TextSize::from(20), ";")
);
Ok(())
}
}

View File

@@ -0,0 +1,115 @@
//! Add and modify import statements to make module members available during fix execution.
use anyhow::Result;
use libcst_native::{Codegen, CodegenState, ImportAlias, Name, NameOrAttribute};
use ruff_text_size::TextSize;
use rustpython_parser::ast::{self, Ranged, Stmt, Suite};
use ruff_diagnostics::Edit;
use ruff_python_ast::imports::AnyImport;
use ruff_python_ast::source_code::{Locator, Stylist};
use crate::cst::matchers::{match_aliases, match_import_from, match_module};
use crate::importer::insertion::Insertion;
mod insertion;
pub(crate) struct Importer<'a> {
python_ast: &'a Suite,
locator: &'a Locator<'a>,
stylist: &'a Stylist<'a>,
ordered_imports: Vec<&'a Stmt>,
}
impl<'a> Importer<'a> {
pub(crate) fn new(
python_ast: &'a Suite,
locator: &'a Locator<'a>,
stylist: &'a Stylist<'a>,
) -> Self {
Self {
python_ast,
locator,
stylist,
ordered_imports: Vec::default(),
}
}
/// Visit a top-level import statement.
pub(crate) fn visit_import(&mut self, import: &'a Stmt) {
self.ordered_imports.push(import);
}
/// Return the import statement that precedes the given position, if any.
fn preceding_import(&self, at: TextSize) -> Option<&Stmt> {
self.ordered_imports
.partition_point(|stmt| stmt.start() < at)
.checked_sub(1)
.map(|idx| self.ordered_imports[idx])
}
/// Add an import statement to import the given module.
///
/// If there are no existing imports, the new import will be added at the top
/// of the file. Otherwise, it will be added after the most recent top-level
/// import statement.
pub(crate) fn add_import(&self, import: &AnyImport, at: TextSize) -> Edit {
let required_import = import.to_string();
if let Some(stmt) = self.preceding_import(at) {
// Insert after the last top-level import.
Insertion::end_of_statement(stmt, self.locator, self.stylist)
.into_edit(&required_import)
} else {
// Insert at the top of the file.
Insertion::top_of_file(self.python_ast, self.locator, self.stylist)
.into_edit(&required_import)
}
}
/// Return the top-level [`Stmt`] that imports the given module using `Stmt::ImportFrom`
/// preceding the given position, if any.
pub(crate) fn find_import_from(&self, module: &str, at: TextSize) -> Option<&Stmt> {
let mut import_from = None;
for stmt in &self.ordered_imports {
if stmt.start() >= at {
break;
}
if let Stmt::ImportFrom(ast::StmtImportFrom {
module: name,
level,
..
}) = stmt
{
if level.map_or(true, |level| level.to_u32() == 0)
&& name.as_ref().map_or(false, |name| name == module)
{
import_from = Some(*stmt);
}
}
}
import_from
}
/// Add the given member to an existing `Stmt::ImportFrom` statement.
pub(crate) fn add_member(&self, stmt: &Stmt, member: &str) -> Result<Edit> {
let mut tree = match_module(self.locator.slice(stmt.range()))?;
let import_from = match_import_from(&mut tree)?;
let aliases = match_aliases(import_from)?;
aliases.push(ImportAlias {
name: NameOrAttribute::N(Box::new(Name {
value: member,
lpar: vec![],
rpar: vec![],
})),
asname: None,
comma: aliases.last().and_then(|alias| alias.comma.clone()),
});
let mut state = CodegenState {
default_newline: &self.stylist.line_ending(),
default_indent: self.stylist.indentation(),
..CodegenState::default()
};
tree.codegen(&mut state);
Ok(Edit::range_replacement(state.to_string(), stmt.range()))
}
}

View File

@@ -88,7 +88,7 @@ pub fn check_path(
let use_doc_lines = settings.rules.enabled(Rule::DocLineTooLong);
let mut doc_lines = vec![];
if use_doc_lines {
doc_lines.extend(doc_lines_from_tokens(&tokens, locator));
doc_lines.extend(doc_lines_from_tokens(&tokens));
}
// Run the token-based rules.
@@ -116,7 +116,6 @@ pub fn check_path(
.iter_enabled()
.any(|rule_code| rule_code.lint_source().is_logical_lines())
{
#[cfg(feature = "logical_lines")]
diagnostics.extend(crate::checkers::logical_lines::check_logical_lines(
&tokens, locator, stylist, settings,
));

View File

@@ -200,11 +200,11 @@ struct TruncateAtNewline<'a>(&'a dyn Display);
impl Display for TruncateAtNewline<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
struct TruncateAdapter<'a> {
inner: &'a mut dyn std::fmt::Write,
inner: &'a mut dyn Write,
after_new_line: bool,
}
impl std::fmt::Write for TruncateAdapter<'_> {
impl Write for TruncateAdapter<'_> {
fn write_str(&mut self, s: &str) -> std::fmt::Result {
if self.after_new_line {
Ok(())

View File

@@ -1,6 +1,6 @@
use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule;
use ruff_python_ast::source_code::{OneIndexed, SourceLocation};
use ruff_python_ast::source_code::SourceLocation;
use std::io::Write;
/// Generate error logging commands for Azure Pipelines format.
@@ -19,10 +19,7 @@ impl Emitter for AzureEmitter {
let location = if context.is_jupyter_notebook(message.filename()) {
// We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback
SourceLocation {
row: OneIndexed::from_zero_indexed(0),
column: OneIndexed::from_zero_indexed(0),
}
SourceLocation::default()
} else {
message.compute_start_location()
};

View File

@@ -1,7 +1,7 @@
use crate::fs::relativize_path;
use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule;
use ruff_python_ast::source_code::{OneIndexed, SourceLocation};
use ruff_python_ast::source_code::SourceLocation;
use std::io::Write;
/// Generate error workflow command in GitHub Actions format.
@@ -21,10 +21,7 @@ impl Emitter for GithubEmitter {
let location = if context.is_jupyter_notebook(message.filename()) {
// We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback
SourceLocation {
row: OneIndexed::from_zero_indexed(0),
column: OneIndexed::from_zero_indexed(0),
}
SourceLocation::default()
} else {
source_location.clone()
};

View File

@@ -3,7 +3,7 @@ use crate::message::{
};
use crate::registry::AsRule;
use quick_junit::{NonSuccessKind, Report, TestCase, TestCaseStatus, TestSuite};
use ruff_python_ast::source_code::{OneIndexed, SourceLocation};
use ruff_python_ast::source_code::SourceLocation;
use std::io::Write;
use std::path::Path;
@@ -19,52 +19,60 @@ impl Emitter for JunitEmitter {
) -> anyhow::Result<()> {
let mut report = Report::new("ruff");
for (filename, messages) in group_messages_by_filename(messages) {
let mut test_suite = TestSuite::new(filename);
if messages.is_empty() {
let mut test_suite = TestSuite::new("ruff");
test_suite
.extra
.insert("package".to_string(), "org.ruff".to_string());
for message in messages {
let MessageWithLocation {
message,
start_location,
} = message;
let mut status = TestCaseStatus::non_success(NonSuccessKind::Failure);
status.set_message(message.kind.body.clone());
let location = if context.is_jupyter_notebook(message.filename()) {
// We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback
SourceLocation {
row: OneIndexed::from_zero_indexed(0),
column: OneIndexed::from_zero_indexed(0),
}
} else {
start_location
};
status.set_description(format!(
"line {row}, col {col}, {body}",
row = location.row,
col = location.column,
body = message.kind.body
));
let mut case = TestCase::new(
format!("org.ruff.{}", message.kind.rule().noqa_code()),
status,
);
let file_path = Path::new(filename);
let file_stem = file_path.file_stem().unwrap().to_str().unwrap();
let classname = file_path.parent().unwrap().join(file_stem);
case.set_classname(classname.to_str().unwrap());
case.extra
.insert("line".to_string(), location.row.to_string());
case.extra
.insert("column".to_string(), location.column.to_string());
test_suite.add_test_case(case);
}
let mut case = TestCase::new("No errors found", TestCaseStatus::success());
case.set_classname("ruff");
test_suite.add_test_case(case);
report.add_test_suite(test_suite);
} else {
for (filename, messages) in group_messages_by_filename(messages) {
let mut test_suite = TestSuite::new(filename);
test_suite
.extra
.insert("package".to_string(), "org.ruff".to_string());
for message in messages {
let MessageWithLocation {
message,
start_location,
} = message;
let mut status = TestCaseStatus::non_success(NonSuccessKind::Failure);
status.set_message(message.kind.body.clone());
let location = if context.is_jupyter_notebook(message.filename()) {
// We can't give a reasonable location for the structured formats,
// so we show one that's clearly a fallback
SourceLocation::default()
} else {
start_location
};
status.set_description(format!(
"line {row}, col {col}, {body}",
row = location.row,
col = location.column,
body = message.kind.body
));
let mut case = TestCase::new(
format!("org.ruff.{}", message.kind.rule().noqa_code()),
status,
);
let file_path = Path::new(filename);
let file_stem = file_path.file_stem().unwrap().to_str().unwrap();
let classname = file_path.parent().unwrap().join(file_stem);
case.set_classname(classname.to_str().unwrap());
case.extra
.insert("line".to_string(), location.row.to_string());
case.extra
.insert("column".to_string(), location.column.to_string());
test_suite.add_test_case(case);
}
report.add_test_suite(test_suite);
}
}
report.serialize(writer)?;

View File

@@ -118,7 +118,7 @@ pub(super) struct RuleCodeAndBody<'a> {
}
impl Display for RuleCodeAndBody<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let kind = &self.message.kind;
if self.show_fix_status && self.message.fix.is_some() {

View File

@@ -14,67 +14,36 @@ pub use rule_set::{RuleSet, RuleSetIterator};
ruff_macros::register_rules!(
// pycodestyle errors
rules::pycodestyle::rules::MixedSpacesAndTabs,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::IndentationWithInvalidMultiple,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::NoIndentedBlock,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::UnexpectedIndentation,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::IndentationWithInvalidMultipleComment,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::NoIndentedBlockComment,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::UnexpectedIndentationComment,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::OverIndented,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::WhitespaceAfterOpenBracket,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::WhitespaceBeforeCloseBracket,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::WhitespaceBeforePunctuation,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::MultipleSpacesBeforeOperator,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::MultipleSpacesAfterOperator,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::TabBeforeOperator,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::TabAfterOperator,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::TooFewSpacesBeforeInlineComment,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::NoSpaceAfterInlineComment,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::NoSpaceAfterBlockComment,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::MultipleLeadingHashesForBlockComment,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::MultipleSpacesAfterKeyword,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::MissingWhitespace,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::MissingWhitespaceAfterKeyword,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::MultipleSpacesBeforeKeyword,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundOperator,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundArithmeticOperator,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundBitwiseOrShiftOperator,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundModuloOperator,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::TabAfterKeyword,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::UnexpectedSpacesAroundKeywordParameterEquals,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundParameterEquals,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::WhitespaceBeforeParameters,
#[cfg(feature = "logical_lines")]
rules::pycodestyle::rules::logical_lines::TabBeforeKeyword,
rules::pycodestyle::rules::MultipleImportsOnOneLine,
rules::pycodestyle::rules::ModuleImportNotAtTopOfFile,
@@ -190,6 +159,13 @@ ruff_macros::register_rules!(
rules::pylint::rules::LoggingTooManyArgs,
rules::pylint::rules::UnexpectedSpecialMethodSignature,
rules::pylint::rules::NestedMinMax,
rules::pylint::rules::DuplicateValue,
rules::pylint::rules::DuplicateBases,
rules::pylint::rules::NamedExprWithoutContext,
// flake8-async
rules::flake8_async::rules::BlockingHttpCallInAsyncFunction,
rules::flake8_async::rules::OpenSleepOrSubprocessInAsyncFunction,
rules::flake8_async::rules::BlockingOsCallInAsyncFunction,
// flake8-builtins
rules::flake8_builtins::rules::BuiltinVariableShadowing,
rules::flake8_builtins::rules::BuiltinArgumentShadowing,
@@ -289,6 +265,8 @@ ruff_macros::register_rules!(
rules::flake8_annotations::rules::MissingReturnTypeStaticMethod,
rules::flake8_annotations::rules::MissingReturnTypeClassMethod,
rules::flake8_annotations::rules::AnyType,
// flake8-future-annotations
rules::flake8_future_annotations::rules::MissingFutureAnnotationsImport,
// flake8-2020
rules::flake8_2020::rules::SysVersionSlice3,
rules::flake8_2020::rules::SysVersion2,
@@ -446,6 +424,7 @@ ruff_macros::register_rules!(
rules::flake8_bandit::rules::HardcodedTempFile,
rules::flake8_bandit::rules::HashlibInsecureHashFunction,
rules::flake8_bandit::rules::Jinja2AutoescapeFalse,
rules::flake8_bandit::rules::ParamikoCall,
rules::flake8_bandit::rules::LoggingConfigInsecureListen,
rules::flake8_bandit::rules::RequestWithNoCertValidation,
rules::flake8_bandit::rules::RequestWithoutTimeout,
@@ -533,18 +512,20 @@ ruff_macros::register_rules!(
rules::flake8_pyi::rules::AssignmentDefaultInStub,
rules::flake8_pyi::rules::BadVersionInfoComparison,
rules::flake8_pyi::rules::DocstringInStub,
rules::flake8_pyi::rules::NonEmptyStubBody,
rules::flake8_pyi::rules::PassStatementStubBody,
rules::flake8_pyi::rules::TypeCommentInStub,
rules::flake8_pyi::rules::TypedArgumentDefaultInStub,
rules::flake8_pyi::rules::UnprefixedTypeParam,
rules::flake8_pyi::rules::UnrecognizedPlatformCheck,
rules::flake8_pyi::rules::UnrecognizedPlatformName,
rules::flake8_pyi::rules::PassInClassBody,
rules::flake8_pyi::rules::DuplicateUnionMember,
rules::flake8_pyi::rules::EllipsisInNonEmptyClassBody,
rules::flake8_pyi::rules::NonEmptyStubBody,
rules::flake8_pyi::rules::PassInClassBody,
rules::flake8_pyi::rules::PassStatementStubBody,
rules::flake8_pyi::rules::QuotedAnnotationInStub,
rules::flake8_pyi::rules::SnakeCaseTypeAlias,
rules::flake8_pyi::rules::TSuffixedTypeAlias,
rules::flake8_pyi::rules::TypeCommentInStub,
rules::flake8_pyi::rules::TypedArgumentDefaultInStub,
rules::flake8_pyi::rules::UnannotatedAssignmentInStub,
rules::flake8_pyi::rules::UnprefixedTypeParam,
rules::flake8_pyi::rules::UnrecognizedPlatformCheck,
rules::flake8_pyi::rules::UnrecognizedPlatformName,
// flake8-pytest-style
rules::flake8_pytest_style::rules::PytestFixtureIncorrectParenthesesStyle,
rules::flake8_pytest_style::rules::PytestFixturePositionalArgs,
@@ -604,6 +585,7 @@ ruff_macros::register_rules!(
rules::tryceratops::rules::ReraiseNoCause,
rules::tryceratops::rules::VerboseRaise,
rules::tryceratops::rules::TryConsiderElse,
rules::tryceratops::rules::UselessTryExcept,
rules::tryceratops::rules::RaiseWithinTry,
rules::tryceratops::rules::ErrorInsteadOfException,
rules::tryceratops::rules::VerboseLogMessage,
@@ -674,6 +656,14 @@ ruff_macros::register_rules!(
rules::flake8_django::rules::DjangoNonLeadingReceiverDecorator,
// flynt
rules::flynt::rules::StaticJoinToFString,
// flake8-todo
rules::flake8_todos::rules::InvalidTodoTag,
rules::flake8_todos::rules::MissingTodoAuthor,
rules::flake8_todos::rules::MissingTodoLink,
rules::flake8_todos::rules::MissingTodoColon,
rules::flake8_todos::rules::MissingTodoDescription,
rules::flake8_todos::rules::InvalidTodoCapitalization,
rules::flake8_todos::rules::MissingSpaceAfterTodoColon,
);
pub trait AsRule {
@@ -724,6 +714,9 @@ pub enum Linter {
/// [flake8-annotations](https://pypi.org/project/flake8-annotations/)
#[prefix = "ANN"]
Flake8Annotations,
/// [flake8-async](https://pypi.org/project/flake8-async/)
#[prefix = "ASYNC"]
Flake8Async,
/// [flake8-bandit](https://pypi.org/project/flake8-bandit/)
#[prefix = "S"]
Flake8Bandit,
@@ -760,6 +753,9 @@ pub enum Linter {
/// [flake8-executable](https://pypi.org/project/flake8-executable/)
#[prefix = "EXE"]
Flake8Executable,
/// [flake8-future-annotations](https://pypi.org/project/flake8-future-annotations/)
#[prefix = "FA"]
Flake8FutureAnnotations,
/// [flake8-implicit-str-concat](https://pypi.org/project/flake8-implicit-str-concat/)
#[prefix = "ISC"]
Flake8ImplicitStrConcat,
@@ -814,6 +810,9 @@ pub enum Linter {
/// [flake8-use-pathlib](https://pypi.org/project/flake8-use-pathlib/)
#[prefix = "PTH"]
Flake8UsePathlib,
/// [flake8-todos](https://github.com/orsinium-labs/flake8-todos/)
#[prefix = "TD"]
Flake8Todo,
/// [eradicate](https://pypi.org/project/eradicate/)
#[prefix = "ERA"]
Eradicate,
@@ -938,11 +937,17 @@ impl Rule {
| Rule::UselessSemicolon
| Rule::MultipleStatementsOnOneLineSemicolon
| Rule::ProhibitedTrailingComma
| Rule::TypeCommentInStub => LintSource::Tokens,
| Rule::TypeCommentInStub
| Rule::InvalidTodoTag
| Rule::MissingTodoAuthor
| Rule::MissingTodoLink
| Rule::MissingTodoColon
| Rule::MissingTodoDescription
| Rule::InvalidTodoCapitalization
| Rule::MissingSpaceAfterTodoColon => LintSource::Tokens,
Rule::IOError => LintSource::Io,
Rule::UnsortedImports | Rule::MissingRequiredImport => LintSource::Imports,
Rule::ImplicitNamespacePackage | Rule::InvalidModuleName => LintSource::Filesystem,
#[cfg(feature = "logical_lines")]
Rule::IndentationWithInvalidMultiple
| Rule::IndentationWithInvalidMultipleComment
| Rule::MissingWhitespace

View File

@@ -119,8 +119,7 @@ impl Visitor<'_> for SelectorVisitor {
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str(
"expected a string code identifying a linter or specific rule, or a partial rule code \
or ALL to refer to all rules",
"expected a string code identifying a linter or specific rule, or a partial rule code or ALL to refer to all rules",
)
}
@@ -141,13 +140,22 @@ impl From<RuleCodePrefix> for RuleSelector {
}
}
/// Returns `true` if the given rule should be selected by the `RuleSelector::All` selector.
fn select_all(rule: Rule) -> bool {
// Nursery rules have to be explicitly selected, so we ignore them when looking at
// prefixes.
!rule.is_nursery()
}
impl IntoIterator for &RuleSelector {
type IntoIter = RuleSelectorIter;
type Item = Rule;
type IntoIter = RuleSelectorIter;
fn into_iter(self) -> Self::IntoIter {
match self {
RuleSelector::All => RuleSelectorIter::All(Rule::iter()),
RuleSelector::All => {
RuleSelectorIter::All(Rule::iter().filter(|rule| select_all(*rule)))
}
RuleSelector::C => RuleSelectorIter::Chain(
Linter::Flake8Comprehensions
.into_iter()
@@ -165,7 +173,7 @@ impl IntoIterator for &RuleSelector {
}
pub enum RuleSelectorIter {
All(RuleIter),
All(std::iter::Filter<RuleIter, fn(&Rule) -> bool>),
Chain(std::iter::Chain<std::vec::IntoIter<Rule>, std::vec::IntoIter<Rule>>),
Vec(std::vec::IntoIter<Rule>),
}
@@ -196,15 +204,16 @@ pub(crate) const fn prefix_to_selector(prefix: RuleCodePrefix) -> RuleSelector {
#[cfg(feature = "schemars")]
mod schema {
use crate::registry::RuleNamespace;
use crate::rule_selector::{Linter, Rule, RuleCodePrefix};
use crate::RuleSelector;
use itertools::Itertools;
use schemars::JsonSchema;
use schemars::_serde_json::Value;
use schemars::schema::{InstanceType, Schema, SchemaObject};
use schemars::JsonSchema;
use strum::IntoEnumIterator;
use crate::registry::RuleNamespace;
use crate::rule_selector::{Linter, RuleCodePrefix};
use crate::RuleSelector;
impl JsonSchema for RuleSelector {
fn schema_name() -> String {
"RuleSelector".to_string()
@@ -228,20 +237,6 @@ mod schema {
.into_iter()
.chain(
RuleCodePrefix::iter()
.filter(|p| {
// Once logical lines are active by default, please remove this.
// This is here because generate-all output otherwise depends on
// the feature sets which makes the test running with
// `--all-features` fail
!Rule::from_code(&format!(
"{}{}",
p.linter().common_prefix(),
p.short_code()
))
.unwrap()
.lint_source()
.is_logical_lines()
})
.map(|p| {
let prefix = p.linter().common_prefix();
let code = p.short_code();

View File

@@ -5,7 +5,7 @@ use rustpython_parser as parser;
static ALLOWLIST_REGEX: Lazy<Regex> = Lazy::new(|| {
Regex::new(
r"^(?i)(?:pylint|pyright|noqa|nosec|type:\s*ignore|fmt:\s*(on|off)|isort:\s*(on|off|skip|skip_file|split|dont-add-imports(:\s*\[.*?])?)|mypy:|SPDX-License-Identifier:)"
r"^(?i)(?:pylint|pyright|noqa|nosec|region|endregion|type:\s*ignore|fmt:\s*(on|off)|isort:\s*(on|off|skip|skip_file|split|dont-add-imports(:\s*\[.*?])?)|mypy:|SPDX-License-Identifier:)"
).unwrap()
});
static BRACKET_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"^[()\[\]{}\s]+$").unwrap());
@@ -224,6 +224,11 @@ mod tests {
assert!(!comment_contains_code("# noqa: A123", &[]));
assert!(!comment_contains_code("# noqa:A123", &[]));
assert!(!comment_contains_code("# nosec", &[]));
assert!(!comment_contains_code("# region", &[]));
assert!(!comment_contains_code("# endregion", &[]));
assert!(!comment_contains_code("# region.name", &[]));
assert!(!comment_contains_code("# region name", &[]));
assert!(!comment_contains_code("# region: name", &[]));
assert!(!comment_contains_code("# fmt: on", &[]));
assert!(!comment_contains_code("# fmt: off", &[]));
assert!(!comment_contains_code("# fmt:on", &[]));

View File

@@ -1,5 +1,5 @@
use num_bigint::BigInt;
use rustpython_parser::ast::{self, Attributed, Cmpop, Constant, Expr, ExprKind};
use rustpython_parser::ast::{self, Cmpop, Constant, Expr, Ranged};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
@@ -123,16 +123,17 @@ fn is_sys(checker: &Checker, expr: &Expr, target: &str) -> bool {
/// YTT101, YTT102, YTT301, YTT303
pub(crate) fn subscript(checker: &mut Checker, value: &Expr, slice: &Expr) {
if is_sys(checker, value, "version") {
match &slice.node {
ExprKind::Slice(ast::ExprSlice {
match slice {
Expr::Slice(ast::ExprSlice {
lower: None,
upper: Some(upper),
step: None,
range: _,
}) => {
if let ExprKind::Constant(ast::ExprConstant {
if let Expr::Constant(ast::ExprConstant {
value: Constant::Int(i),
..
}) = &upper.node
}) = upper.as_ref()
{
if *i == BigInt::from(1)
&& checker.settings.rules.enabled(Rule::SysVersionSlice1)
@@ -150,7 +151,7 @@ pub(crate) fn subscript(checker: &mut Checker, value: &Expr, slice: &Expr) {
}
}
ExprKind::Constant(ast::ExprConstant {
Expr::Constant(ast::ExprConstant {
value: Constant::Int(i),
..
}) => {
@@ -173,26 +174,22 @@ pub(crate) fn subscript(checker: &mut Checker, value: &Expr, slice: &Expr) {
/// YTT103, YTT201, YTT203, YTT204, YTT302
pub(crate) fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], comparators: &[Expr]) {
match &left.node {
ExprKind::Subscript(ast::ExprSubscript { value, slice, .. })
match left {
Expr::Subscript(ast::ExprSubscript { value, slice, .. })
if is_sys(checker, value, "version_info") =>
{
if let ExprKind::Constant(ast::ExprConstant {
if let Expr::Constant(ast::ExprConstant {
value: Constant::Int(i),
..
}) = &slice.node
}) = slice.as_ref()
{
if *i == BigInt::from(0) {
if let (
[Cmpop::Eq | Cmpop::NotEq],
[Attributed {
node:
ExprKind::Constant(ast::ExprConstant {
value: Constant::Int(n),
..
}),
[Expr::Constant(ast::ExprConstant {
value: Constant::Int(n),
..
}],
})],
) = (ops, comparators)
{
if *n == BigInt::from(3)
@@ -206,14 +203,10 @@ pub(crate) fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], compara
} else if *i == BigInt::from(1) {
if let (
[Cmpop::Lt | Cmpop::LtE | Cmpop::Gt | Cmpop::GtE],
[Attributed {
node:
ExprKind::Constant(ast::ExprConstant {
value: Constant::Int(_),
..
}),
[Expr::Constant(ast::ExprConstant {
value: Constant::Int(_),
..
}],
})],
) = (ops, comparators)
{
if checker.settings.rules.enabled(Rule::SysVersionInfo1CmpInt) {
@@ -226,19 +219,15 @@ pub(crate) fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], compara
}
}
ExprKind::Attribute(ast::ExprAttribute { value, attr, .. })
Expr::Attribute(ast::ExprAttribute { value, attr, .. })
if is_sys(checker, value, "version_info") && attr == "minor" =>
{
if let (
[Cmpop::Lt | Cmpop::LtE | Cmpop::Gt | Cmpop::GtE],
[Attributed {
node:
ExprKind::Constant(ast::ExprConstant {
value: Constant::Int(_),
..
}),
[Expr::Constant(ast::ExprConstant {
value: Constant::Int(_),
..
}],
})],
) = (ops, comparators)
{
if checker
@@ -259,14 +248,10 @@ pub(crate) fn compare(checker: &mut Checker, left: &Expr, ops: &[Cmpop], compara
if is_sys(checker, left, "version") {
if let (
[Cmpop::Lt | Cmpop::LtE | Cmpop::Gt | Cmpop::GtE],
[Attributed {
node:
ExprKind::Constant(ast::ExprConstant {
value: Constant::Str(s),
..
}),
[Expr::Constant(ast::ExprConstant {
value: Constant::Str(s),
..
}],
})],
) = (ops, comparators)
{
if s.len() == 1 {

View File

@@ -1,5 +1,5 @@
use anyhow::{bail, Result};
use rustpython_parser::ast::Stmt;
use rustpython_parser::ast::{Ranged, Stmt};
use rustpython_parser::{lexer, Mode, Tok};
use ruff_diagnostics::Edit;

View File

@@ -1,4 +1,4 @@
use rustpython_parser::ast::{self, Arguments, Expr, Stmt, StmtKind};
use rustpython_parser::ast::{self, Arguments, Expr, Stmt};
use ruff_python_ast::cast;
use ruff_python_semantic::analyze::visibility;
@@ -6,22 +6,32 @@ use ruff_python_semantic::definition::{Definition, Member, MemberKind};
use crate::checkers::ast::Checker;
pub(super) fn match_function_def(stmt: &Stmt) -> (&str, &Arguments, Option<&Expr>, &Vec<Stmt>) {
match &stmt.node {
StmtKind::FunctionDef(ast::StmtFunctionDef {
pub(super) fn match_function_def(
stmt: &Stmt,
) -> (&str, &Arguments, Option<&Expr>, &[Stmt], &[Expr]) {
match stmt {
Stmt::FunctionDef(ast::StmtFunctionDef {
name,
args,
returns,
body,
decorator_list,
..
})
| StmtKind::AsyncFunctionDef(ast::StmtAsyncFunctionDef {
| Stmt::AsyncFunctionDef(ast::StmtAsyncFunctionDef {
name,
args,
returns,
body,
decorator_list,
..
}) => (name, args, returns.as_ref().map(|expr| &**expr), body),
}) => (
name,
args,
returns.as_ref().map(|expr| &**expr),
body,
decorator_list,
),
_ => panic!("Found non-FunctionDef in match_name"),
}
}

View File

@@ -1,4 +1,4 @@
use rustpython_parser::ast::{Expr, ExprKind, Stmt};
use rustpython_parser::ast::{Expr, Ranged, Stmt};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
@@ -419,8 +419,8 @@ fn is_none_returning(body: &[Stmt]) -> bool {
visitor.visit_body(body);
for expr in visitor.returns.into_iter().flatten() {
if !matches!(
expr.node,
ExprKind::Constant(ref constant) if constant.value.is_none()
expr,
Expr::Constant(ref constant) if constant.value.is_none()
) {
return false;
}
@@ -434,10 +434,11 @@ fn check_dynamically_typed<F>(
annotation: &Expr,
func: F,
diagnostics: &mut Vec<Diagnostic>,
is_overridden: bool,
) where
F: FnOnce() -> String,
{
if checker.ctx.match_typing_expr(annotation, "Any") {
if !is_overridden && checker.ctx.match_typing_expr(annotation, "Any") {
diagnostics.push(Diagnostic::new(
AnyType { name: func() },
annotation.range(),
@@ -468,7 +469,7 @@ pub(crate) fn definition(
_ => return vec![],
};
let (name, args, returns, body) = match_function_def(stmt);
let (name, args, returns, body, decorator_list) = match_function_def(stmt);
// Keep track of whether we've seen any typed arguments or return values.
let mut has_any_typed_arg = false; // Any argument has been typed?
let mut has_typed_return = false; // Return value has been typed?
@@ -478,6 +479,8 @@ pub(crate) fn definition(
// unless configured to suppress ANN* for declarations that are fully untyped.
let mut diagnostics = Vec::new();
let is_overridden = visibility::is_override(&checker.ctx, decorator_list);
// ANN001, ANN401
for arg in args
.posonlyargs
@@ -492,19 +495,20 @@ pub(crate) fn definition(
)
{
// ANN401 for dynamically typed arguments
if let Some(annotation) = &arg.node.annotation {
if let Some(annotation) = &arg.annotation {
has_any_typed_arg = true;
if checker.settings.rules.enabled(Rule::AnyType) {
check_dynamically_typed(
checker,
annotation,
|| arg.node.arg.to_string(),
|| arg.arg.to_string(),
&mut diagnostics,
is_overridden,
);
}
} else {
if !(checker.settings.flake8_annotations.suppress_dummy_args
&& checker.settings.dummy_variable_rgx.is_match(&arg.node.arg))
&& checker.settings.dummy_variable_rgx.is_match(&arg.arg))
{
if checker
.settings
@@ -513,7 +517,7 @@ pub(crate) fn definition(
{
diagnostics.push(Diagnostic::new(
MissingTypeFunctionArgument {
name: arg.node.arg.to_string(),
name: arg.arg.to_string(),
},
arg.range(),
));
@@ -524,22 +528,28 @@ pub(crate) fn definition(
// ANN002, ANN401
if let Some(arg) = &args.vararg {
if let Some(expr) = &arg.node.annotation {
if let Some(expr) = &arg.annotation {
has_any_typed_arg = true;
if !checker.settings.flake8_annotations.allow_star_arg_any {
if checker.settings.rules.enabled(Rule::AnyType) {
let name = &arg.node.arg;
check_dynamically_typed(checker, expr, || format!("*{name}"), &mut diagnostics);
let name = &arg.arg;
check_dynamically_typed(
checker,
expr,
|| format!("*{name}"),
&mut diagnostics,
is_overridden,
);
}
}
} else {
if !(checker.settings.flake8_annotations.suppress_dummy_args
&& checker.settings.dummy_variable_rgx.is_match(&arg.node.arg))
&& checker.settings.dummy_variable_rgx.is_match(&arg.arg))
{
if checker.settings.rules.enabled(Rule::MissingTypeArgs) {
diagnostics.push(Diagnostic::new(
MissingTypeArgs {
name: arg.node.arg.to_string(),
name: arg.arg.to_string(),
},
arg.range(),
));
@@ -550,27 +560,28 @@ pub(crate) fn definition(
// ANN003, ANN401
if let Some(arg) = &args.kwarg {
if let Some(expr) = &arg.node.annotation {
if let Some(expr) = &arg.annotation {
has_any_typed_arg = true;
if !checker.settings.flake8_annotations.allow_star_arg_any {
if checker.settings.rules.enabled(Rule::AnyType) {
let name = &arg.node.arg;
let name = &arg.arg;
check_dynamically_typed(
checker,
expr,
|| format!("**{name}"),
&mut diagnostics,
is_overridden,
);
}
}
} else {
if !(checker.settings.flake8_annotations.suppress_dummy_args
&& checker.settings.dummy_variable_rgx.is_match(&arg.node.arg))
&& checker.settings.dummy_variable_rgx.is_match(&arg.arg))
{
if checker.settings.rules.enabled(Rule::MissingTypeKwargs) {
diagnostics.push(Diagnostic::new(
MissingTypeKwargs {
name: arg.node.arg.to_string(),
name: arg.arg.to_string(),
},
arg.range(),
));
@@ -582,12 +593,12 @@ pub(crate) fn definition(
// ANN101, ANN102
if is_method && !visibility::is_staticmethod(&checker.ctx, cast::decorator_list(stmt)) {
if let Some(arg) = args.posonlyargs.first().or_else(|| args.args.first()) {
if arg.node.annotation.is_none() {
if arg.annotation.is_none() {
if visibility::is_classmethod(&checker.ctx, cast::decorator_list(stmt)) {
if checker.settings.rules.enabled(Rule::MissingTypeCls) {
diagnostics.push(Diagnostic::new(
MissingTypeCls {
name: arg.node.arg.to_string(),
name: arg.arg.to_string(),
},
arg.range(),
));
@@ -596,7 +607,7 @@ pub(crate) fn definition(
if checker.settings.rules.enabled(Rule::MissingTypeSelf) {
diagnostics.push(Diagnostic::new(
MissingTypeSelf {
name: arg.node.arg.to_string(),
name: arg.arg.to_string(),
},
arg.range(),
));
@@ -612,7 +623,13 @@ pub(crate) fn definition(
if let Some(expr) = &returns {
has_typed_return = true;
if checker.settings.rules.enabled(Rule::AnyType) {
check_dynamically_typed(checker, expr, || name.to_string(), &mut diagnostics);
check_dynamically_typed(
checker,
expr,
|| name.to_string(),
&mut diagnostics,
is_overridden,
);
}
} else if !(
// Allow omission of return annotation if the function only returns `None`

View File

@@ -2,10 +2,11 @@
use serde::{Deserialize, Serialize};
use ruff_macros::CacheKey;
use ruff_macros::ConfigurationOptions;
use ruff_macros::{CacheKey, CombineOptions, ConfigurationOptions};
#[derive(Debug, PartialEq, Eq, Default, Serialize, Deserialize, ConfigurationOptions)]
#[derive(
Debug, PartialEq, Eq, Default, Serialize, Deserialize, ConfigurationOptions, CombineOptions,
)]
#[serde(
deny_unknown_fields,
rename_all = "kebab-case",

View File

@@ -1,189 +1,189 @@
---
source: crates/ruff/src/rules/flake8_annotations/mod.rs
---
annotation_presence.py:4:5: ANN201 Missing return type annotation for public function `foo`
annotation_presence.py:5:5: ANN201 Missing return type annotation for public function `foo`
|
4 | # Error
5 | def foo(a, b):
5 | # Error
6 | def foo(a, b):
| ^^^ ANN201
6 | pass
7 | pass
|
annotation_presence.py:4:9: ANN001 Missing type annotation for function argument `a`
annotation_presence.py:5:9: ANN001 Missing type annotation for function argument `a`
|
4 | # Error
5 | def foo(a, b):
5 | # Error
6 | def foo(a, b):
| ^ ANN001
6 | pass
7 | pass
|
annotation_presence.py:4:12: ANN001 Missing type annotation for function argument `b`
annotation_presence.py:5:12: ANN001 Missing type annotation for function argument `b`
|
4 | # Error
5 | def foo(a, b):
5 | # Error
6 | def foo(a, b):
| ^ ANN001
6 | pass
7 | pass
|
annotation_presence.py:9:5: ANN201 Missing return type annotation for public function `foo`
annotation_presence.py:10:5: ANN201 Missing return type annotation for public function `foo`
|
9 | # Error
10 | def foo(a: int, b):
10 | # Error
11 | def foo(a: int, b):
| ^^^ ANN201
11 | pass
12 | pass
|
annotation_presence.py:9:17: ANN001 Missing type annotation for function argument `b`
annotation_presence.py:10:17: ANN001 Missing type annotation for function argument `b`
|
9 | # Error
10 | def foo(a: int, b):
10 | # Error
11 | def foo(a: int, b):
| ^ ANN001
11 | pass
12 | pass
|
annotation_presence.py:14:17: ANN001 Missing type annotation for function argument `b`
annotation_presence.py:15:17: ANN001 Missing type annotation for function argument `b`
|
14 | # Error
15 | def foo(a: int, b) -> int:
15 | # Error
16 | def foo(a: int, b) -> int:
| ^ ANN001
16 | pass
17 | pass
|
annotation_presence.py:19:5: ANN201 Missing return type annotation for public function `foo`
annotation_presence.py:20:5: ANN201 Missing return type annotation for public function `foo`
|
19 | # Error
20 | def foo(a: int, b: int):
20 | # Error
21 | def foo(a: int, b: int):
| ^^^ ANN201
21 | pass
22 | pass
|
annotation_presence.py:24:5: ANN201 Missing return type annotation for public function `foo`
annotation_presence.py:25:5: ANN201 Missing return type annotation for public function `foo`
|
24 | # Error
25 | def foo():
25 | # Error
26 | def foo():
| ^^^ ANN201
26 | pass
27 | pass
|
annotation_presence.py:44:12: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `a`
annotation_presence.py:45:12: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `a`
|
44 | # ANN401
45 | def foo(a: Any, *args: str, **kwargs: str) -> int:
45 | # ANN401
46 | def foo(a: Any, *args: str, **kwargs: str) -> int:
| ^^^ ANN401
46 | pass
47 | pass
|
annotation_presence.py:49:47: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `foo`
annotation_presence.py:50:47: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `foo`
|
49 | # ANN401
50 | def foo(a: int, *args: str, **kwargs: str) -> Any:
50 | # ANN401
51 | def foo(a: int, *args: str, **kwargs: str) -> Any:
| ^^^ ANN401
51 | pass
52 | pass
|
annotation_presence.py:54:24: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `*args`
annotation_presence.py:55:24: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `*args`
|
54 | # ANN401
55 | def foo(a: int, *args: Any, **kwargs: Any) -> int:
55 | # ANN401
56 | def foo(a: int, *args: Any, **kwargs: Any) -> int:
| ^^^ ANN401
56 | pass
57 | pass
|
annotation_presence.py:54:39: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `**kwargs`
annotation_presence.py:55:39: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `**kwargs`
|
54 | # ANN401
55 | def foo(a: int, *args: Any, **kwargs: Any) -> int:
55 | # ANN401
56 | def foo(a: int, *args: Any, **kwargs: Any) -> int:
| ^^^ ANN401
56 | pass
57 | pass
|
annotation_presence.py:59:24: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `*args`
annotation_presence.py:60:24: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `*args`
|
59 | # ANN401
60 | def foo(a: int, *args: Any, **kwargs: str) -> int:
60 | # ANN401
61 | def foo(a: int, *args: Any, **kwargs: str) -> int:
| ^^^ ANN401
61 | pass
62 | pass
|
annotation_presence.py:64:39: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `**kwargs`
annotation_presence.py:65:39: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `**kwargs`
|
64 | # ANN401
65 | def foo(a: int, *args: str, **kwargs: Any) -> int:
65 | # ANN401
66 | def foo(a: int, *args: str, **kwargs: Any) -> int:
| ^^^ ANN401
66 | pass
67 | pass
|
annotation_presence.py:74:13: ANN101 Missing type annotation for `self` in method
annotation_presence.py:75:13: ANN101 Missing type annotation for `self` in method
|
74 | # ANN101
75 | def foo(self, a: int, b: int) -> int:
75 | # ANN101
76 | def foo(self, a: int, b: int) -> int:
| ^^^^ ANN101
76 | pass
77 | pass
|
annotation_presence.py:78:29: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `a`
annotation_presence.py:79:29: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `a`
|
78 | # ANN401
79 | def foo(self: "Foo", a: Any, *params: str, **options: str) -> int:
79 | # ANN401
80 | def foo(self: "Foo", a: Any, *params: str, **options: str) -> int:
| ^^^ ANN401
80 | pass
81 | pass
|
annotation_presence.py:82:67: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `foo`
annotation_presence.py:83:67: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `foo`
|
82 | # ANN401
83 | def foo(self: "Foo", a: int, *params: str, **options: str) -> Any:
83 | # ANN401
84 | def foo(self: "Foo", a: int, *params: str, **options: str) -> Any:
| ^^^ ANN401
84 | pass
85 | pass
|
annotation_presence.py:86:43: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `*params`
annotation_presence.py:87:43: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `*params`
|
86 | # ANN401
87 | def foo(self: "Foo", a: int, *params: Any, **options: Any) -> int:
87 | # ANN401
88 | def foo(self: "Foo", a: int, *params: Any, **options: Any) -> int:
| ^^^ ANN401
88 | pass
89 | pass
|
annotation_presence.py:86:59: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `**options`
annotation_presence.py:87:59: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `**options`
|
86 | # ANN401
87 | def foo(self: "Foo", a: int, *params: Any, **options: Any) -> int:
87 | # ANN401
88 | def foo(self: "Foo", a: int, *params: Any, **options: Any) -> int:
| ^^^ ANN401
88 | pass
89 | pass
|
annotation_presence.py:90:43: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `*params`
annotation_presence.py:91:43: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `*params`
|
90 | # ANN401
91 | def foo(self: "Foo", a: int, *params: Any, **options: str) -> int:
91 | # ANN401
92 | def foo(self: "Foo", a: int, *params: Any, **options: str) -> int:
| ^^^ ANN401
92 | pass
93 | pass
|
annotation_presence.py:94:59: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `**options`
annotation_presence.py:95:59: ANN401 Dynamically typed expressions (typing.Any) are disallowed in `**options`
|
94 | # ANN401
95 | def foo(self: "Foo", a: int, *params: str, **options: Any) -> int:
95 | # ANN401
96 | def foo(self: "Foo", a: int, *params: str, **options: Any) -> int:
| ^^^ ANN401
96 | pass
97 | pass
|
annotation_presence.py:104:13: ANN102 Missing type annotation for `cls` in classmethod
annotation_presence.py:130:13: ANN102 Missing type annotation for `cls` in classmethod
|
104 | # ANN102
105 | @classmethod
106 | def foo(cls, a: int, b: int) -> int:
130 | # ANN102
131 | @classmethod
132 | def foo(cls, a: int, b: int) -> int:
| ^^^ ANN102
107 | pass
133 | pass
|
annotation_presence.py:108:13: ANN101 Missing type annotation for `self` in method
annotation_presence.py:134:13: ANN101 Missing type annotation for `self` in method
|
108 | # ANN101
109 | def foo(self, /, a: int, b: int) -> int:
134 | # ANN101
135 | def foo(self, /, a: int, b: int) -> int:
| ^^^^ ANN101
110 | pass
136 | pass
|

View File

@@ -0,0 +1,28 @@
//! Rules from [flake8-async](https://pypi.org/project/flake8-async/).
pub(crate) mod rules;
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use crate::assert_messages;
use crate::registry::Rule;
use crate::settings::Settings;
use crate::test::test_path;
#[test_case(Rule::BlockingHttpCallInAsyncFunction, Path::new("ASYNC100.py"); "ASYNC100")]
#[test_case(Rule::OpenSleepOrSubprocessInAsyncFunction, Path::new("ASYNC101.py"); "ASYNC101")]
#[test_case(Rule::BlockingOsCallInAsyncFunction, Path::new("ASYNC102.py"); "ASYNC102")]
fn rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.noqa_code(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_async").join(path).as_path(),
&Settings::for_rule(rule_code),
)?;
assert_messages!(snapshot, diagnostics);
Ok(())
}
}

Some files were not shown because too many files have changed in this diff Show More