Compare commits

...

18 Commits

Author SHA1 Message Date
Charlie Marsh
cfbd068dd5 Bump version to 0.0.218 2023-01-10 21:28:23 -05:00
Charlie Marsh
8aed23fe0a Avoid B023 false-positives for some common builtins (#1776)
This is based on the upstream work in
https://github.com/PyCQA/flake8-bugbear/pull/303 and
https://github.com/PyCQA/flake8-bugbear/pull/305/files.

Resolves #1686.
2023-01-10 21:23:48 -05:00
Colin Delahunty
c016c41c71 Pyupgrade: Format specifiers (#1594)
A part of #827. Posting this for visibility. Still has some work to do
to be done.

Things that still need done before this is ready:

- [x] Does not work when the item is being assigned to a variable
- [x] Does not work if being used in a function call
- [x] Fix incorrectly removed calls in the function
- [x] Has not been tested with pyupgrade negative test cases

Tests from pyupgrade can be seen here:
https://github.com/asottile/pyupgrade/blob/main/tests/features/format_literals_test.py

Co-authored-by: Charlie Marsh <charlie.r.marsh@gmail.com>
2023-01-10 20:21:04 -05:00
Charlie Marsh
f1a5e53f06 Enable isort-style required-imports enforcement (#1762)
In isort, this is called `add-imports`, but I prefer the declarative
name.

The idea is that by adding the following to your `pyproject.toml`, you
can ensure that the import is included in all files:

```toml
[tool.ruff.isort]
required-imports = ["from __future__ import annotations"]
```

I mostly reverse-engineered isort's logic for making decisions, though I
made some slight tweaks that I think are preferable. A few comments:

- Like isort, we don't enforce this on empty files (like empty
`__init__.py`).
- Like isort, we require that the import is at the top-level.
- isort will skip any docstrings, and any comments on the first three
lines (I think, based on testing). Ruff places the import after the last
docstring or comment in the file preamble (that is: after the last
docstring or comment that comes before the _first_ non-docstring and
non-comment).

Resolves #1700.
2023-01-10 18:12:57 -05:00
Charlie Marsh
1e94e0221f Disable doctests (#1772)
We don't have any doctests, but `cargo test --all` spends more than half
the time on doctests? A little confusing, but this brings the test time
from > 4s to < 2s on my machine.
2023-01-10 15:10:16 -05:00
Martin Fischer
543865c96b Generate RuleCode::origin() via macro (#1770) 2023-01-10 13:20:43 -05:00
Maksudul Haque
b8e3f0bc13 [flake8-bandit] Add Rule for S508 (snmp insecure version) & S509 (snmp weak cryptography) (#1771)
ref: https://github.com/charliermarsh/ruff/issues/1646

Co-authored-by: messense <messense@icloud.com>
Co-authored-by: Charlie Marsh <charlie.r.marsh@gmail.com>
2023-01-10 13:13:54 -05:00
Charlie Marsh
643cedb200 Move CONTRIBUTING.md to top-level (#1768) 2023-01-10 07:38:12 -05:00
Charlie Marsh
91620c378a Disable release builds on CI (#1761) 2023-01-10 07:33:03 -05:00
Harutaka Kawamura
b732135795 Do not autofix PT004 and PT005 (#1763)
As @edgarrmondragon commented in
https://github.com/charliermarsh/ruff/pull/1740#issuecomment-1376230550,
just renaming fixture doesn't work.
2023-01-10 07:24:16 -05:00
messense
9384a081f9 Implement flake8-simplify SIM112 (#1764)
Ref #998
2023-01-10 07:24:01 -05:00
Charlie Marsh
edab268d50 Bump version to 0.0.217 2023-01-09 23:26:22 -05:00
Charlie Marsh
e4fad70a57 Update documentation to match latest terminology (#1760)
Closes #1759.
2023-01-09 21:10:47 -05:00
Charlie Marsh
1a09fff991 Update rule-generation scripts to match latest conventions (#1758)
Resolves #1755.
2023-01-09 19:55:46 -05:00
Charlie Marsh
b85105d2ec Add a helper for any-like operations (#1757) 2023-01-09 19:34:33 -05:00
Charlie Marsh
f7ac28a935 Omit sys.version_info and sys.platform checks from ternary rule (#1756)
Resolves #1753.
2023-01-09 19:22:34 -05:00
Charlie Marsh
9532f342a6 Enable project-specific typing module re-exports (#1754)
Resolves #1744.
2023-01-09 18:17:50 -05:00
Mohamed Daahir
0ee37aa0aa Cache build artifacts using Swatinem/rust-cache@v1 (#1750)
This GitHub Action caches build artifacts in addition to dependencies
which halves the CI duration time.

Resolves #1752.
2023-01-09 15:35:32 -05:00
105 changed files with 2950 additions and 1306 deletions

View File

@@ -26,21 +26,9 @@ jobs:
profile: minimal
toolchain: nightly-2022-11-01
override: true
components: rustfmt
- uses: actions/cache@v3
env:
cache-name: cache-cargo
with:
path: |
~/.cargo/registry
~/.cargo/git
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-
${{ runner.os }}-build-
${{ runner.os }}-
- run: cargo build --all --release
- run: ./target/release/ruff_dev generate-all
- uses: Swatinem/rust-cache@v1
- run: cargo build --all
- run: ./target/debug/ruff_dev generate-all
- run: git diff --quiet README.md || echo "::error file=README.md::This file is outdated. Run 'cargo +nightly dev generate-all'."
- run: git diff --quiet ruff.schema.json || echo "::error file=ruff.schema.json::This file is outdated. Run 'cargo +nightly dev generate-all'."
- run: git diff --exit-code -- README.md ruff.schema.json
@@ -56,18 +44,6 @@ jobs:
toolchain: nightly-2022-11-01
override: true
components: rustfmt
- uses: actions/cache@v3
env:
cache-name: cache-cargo
with:
path: |
~/.cargo/registry
~/.cargo/git
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-
${{ runner.os }}-build-
${{ runner.os }}-
- run: cargo fmt --all --check
cargo_clippy:
@@ -82,18 +58,7 @@ jobs:
override: true
components: clippy
target: wasm32-unknown-unknown
- uses: actions/cache@v3
env:
cache-name: cache-cargo
with:
path: |
~/.cargo/registry
~/.cargo/git
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-
${{ runner.os }}-build-
${{ runner.os }}-
- uses: Swatinem/rust-cache@v1
- run: cargo clippy --workspace --all-targets --all-features -- -D warnings -W clippy::pedantic
- run: cargo clippy --workspace --target wasm32-unknown-unknown --all-features -- -D warnings -W clippy::pedantic
@@ -107,18 +72,7 @@ jobs:
profile: minimal
toolchain: nightly-2022-11-01
override: true
- uses: actions/cache@v3
env:
cache-name: cache-cargo
with:
path: |
~/.cargo/registry
~/.cargo/git
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-
${{ runner.os }}-build-
${{ runner.os }}-
- uses: Swatinem/rust-cache@v1
- run: cargo install cargo-insta
- run: pip install black[d]==22.12.0
- name: Run tests
@@ -167,22 +121,11 @@ jobs:
profile: minimal
toolchain: nightly-2022-11-01
override: true
- uses: Swatinem/rust-cache@v1
- uses: actions/setup-python@v4
with:
python-version: "3.11"
- run: pip install maturin
- uses: actions/cache@v3
env:
cache-name: cache-cargo
with:
path: |
~/.cargo/registry
~/.cargo/git
key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-build-${{ env.cache-name }}-
${{ runner.os }}-build-
${{ runner.os }}-
- run: maturin build -b bin
typos:

View File

@@ -1,6 +1,6 @@
repos:
- repo: https://github.com/charliermarsh/ruff-pre-commit
rev: v0.0.216
rev: v0.0.218
hooks:
- id: ruff

View File

@@ -56,9 +56,9 @@ prior to merging.
There are four phases to adding a new lint rule:
1. Define the violation in `src/violations.rs` (e.g., `ModuleImportNotAtTopOfFile`).
2. Map the violation to a code in `src/registry.rs` (e.g., `E402`).
3. Define the _logic_ for triggering the violation in `src/checkers/ast.rs` (for AST-based checks),
1. Define the violation struct in `src/violations.rs` (e.g., `ModuleImportNotAtTopOfFile`).
2. Map the violation struct to a rule code in `src/registry.rs` (e.g., `E402`).
3. Define the logic for triggering the violation in `src/checkers/ast.rs` (for AST-based checks),
`src/checkers/tokens.rs` (for token-based checks), or `src/checkers/lines.rs` (for text-based checks).
4. Add a test fixture.
5. Update the generated files (documentation and generated code).
@@ -74,15 +74,16 @@ collecting diagnostics as it goes.
If you need to inspect the AST, you can run `cargo +nightly dev print-ast` with a Python file. Grep
for the `Check::new` invocations to understand how other, similar rules are implemented.
To add a test fixture, create a file under `resources/test/fixtures/[plugin-name]`, named to match
the code you defined earlier (e.g., `E402.py`). This file should contain a variety of
violations and non-violations designed to evaluate and demonstrate the behavior of your lint rule.
To add a test fixture, create a file under `resources/test/fixtures/[origin]`, named to match
the code you defined earlier (e.g., `resources/test/fixtures/pycodestyle/E402.py`). This file should
contain a variety of violations and non-violations designed to evaluate and demonstrate the behavior
of your lint rule.
Run `cargo +nightly dev generate-all` to generate the code for your new fixture. Then run Ruff
locally with (e.g.) `cargo run resources/test/fixtures/pycodestyle/E402.py --no-cache --select E402`.
Once you're satisfied with the output, codify the behavior as a snapshot test by adding a new
`test_case` macro in the relevant `src/[plugin-name]/mod.rs` file. Then, run `cargo test --all`.
`test_case` macro in the relevant `src/[origin]/mod.rs` file. Then, run `cargo test --all`.
Your test will fail, but you'll be prompted to follow-up with `cargo insta review`. Accept the
generated snapshot, then commit the snapshot file alongside the rest of your changes.

8
Cargo.lock generated
View File

@@ -735,7 +735,7 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
[[package]]
name = "flake8-to-ruff"
version = "0.0.216-dev.0"
version = "0.0.218-dev.0"
dependencies = [
"anyhow",
"clap 4.0.32",
@@ -1874,7 +1874,7 @@ dependencies = [
[[package]]
name = "ruff"
version = "0.0.216"
version = "0.0.218"
dependencies = [
"annotate-snippets 0.9.1",
"anyhow",
@@ -1942,7 +1942,7 @@ dependencies = [
[[package]]
name = "ruff_dev"
version = "0.0.216"
version = "0.0.218"
dependencies = [
"anyhow",
"clap 4.0.32",
@@ -1962,7 +1962,7 @@ dependencies = [
[[package]]
name = "ruff_macros"
version = "0.0.216"
version = "0.0.218"
dependencies = [
"once_cell",
"proc-macro2",

View File

@@ -6,7 +6,7 @@ members = [
[package]
name = "ruff"
version = "0.0.216"
version = "0.0.218"
authors = ["Charlie Marsh <charlie.r.marsh@gmail.com>"]
edition = "2021"
rust-version = "1.65.0"
@@ -19,6 +19,7 @@ license = "MIT"
[lib]
name = "ruff"
crate-type = ["cdylib", "rlib"]
doctest = false
[dependencies]
annotate-snippets = { version = "0.9.1", features = ["color"] }
@@ -51,7 +52,7 @@ path-absolutize = { version = "3.0.14", features = ["once_cell_cache", "use_unix
quick-junit = { version = "0.3.2" }
regex = { version = "1.6.0" }
ropey = { version = "1.5.0", features = ["cr_lines", "simd"], default-features = false }
ruff_macros = { version = "0.0.216", path = "ruff_macros" }
ruff_macros = { version = "0.0.218", path = "ruff_macros" }
rustc-hash = { version = "1.1.0" }
rustpython-ast = { features = ["unparse"], git = "https://github.com/RustPython/RustPython.git", rev = "d532160333ffeb6dbeca2c2728c2391cd1e53b7f" }
rustpython-common = { git = "https://github.com/RustPython/RustPython.git", rev = "d532160333ffeb6dbeca2c2728c2391cd1e53b7f" }

216
README.md
View File

@@ -164,9 +164,9 @@ pacman -S ruff
To run Ruff, try any of the following:
```shell
ruff path/to/code/to/check.py # Run Ruff over `check.py`
ruff path/to/code/ # Run Ruff over all files in `/path/to/code` (and any subdirectories)
ruff path/to/code/*.py # Run Ruff over all `.py` files in `/path/to/code`
ruff path/to/code/to/lint.py # Run Ruff over `lint.py`
ruff path/to/code/ # Run Ruff over all files in `/path/to/code` (and any subdirectories)
ruff path/to/code/*.py # Run Ruff over all `.py` files in `/path/to/code`
```
You can run Ruff in `--watch` mode to automatically re-run on-change:
@@ -180,7 +180,7 @@ Ruff also works with [pre-commit](https://pre-commit.com):
```yaml
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
rev: 'v0.0.216'
rev: 'v0.0.218'
hooks:
- id: ruff
# Respect `exclude` and `extend-exclude` settings.
@@ -237,9 +237,9 @@ target-version = "py310"
max-complexity = 10
```
As an example, the following would configure Ruff to: (1) avoid checking for line-length
violations (`E501`); (2) never remove unused imports (`F401`); and (3) ignore import-at-top-of-file
errors (`E402`) in `__init__.py` files:
As an example, the following would configure Ruff to: (1) avoid enforcing line-length violations
(`E501`); (2) never remove unused imports (`F401`); and (3) ignore import-at-top-of-file violations
(`E402`) in `__init__.py` files:
```toml
[tool.ruff]
@@ -269,16 +269,16 @@ select = ["E", "F", "Q"]
docstring-quotes = "double"
```
Ruff mirrors Flake8's error code system, in which each error code consists of a one-to-three letter
prefix, followed by three digits (e.g., `F401`). The prefix indicates that "source" of the error
code (e.g., `F` for Pyflakes, `E` for `pycodestyle`, `ANN` for `flake8-annotations`). The set of
enabled errors is determined by the `select` and `ignore` options, which support both the full
error code (e.g., `F401`) and the prefix (e.g., `F`).
Ruff mirrors Flake8's rule code system, in which each rule code consists of a one-to-three letter
prefix, followed by three digits (e.g., `F401`). The prefix indicates that "source" of the rule
(e.g., `F` for Pyflakes, `E` for `pycodestyle`, `ANN` for `flake8-annotations`). The set of enabled
rules is determined by the `select` and `ignore` options, which support both the full code (e.g.,
`F401`) and the prefix (e.g., `F`).
As a special-case, Ruff also supports the `ALL` error code, which enables all error codes. Note that
some of the `pydocstyle` error codes are conflicting (e.g., `D203` and `D211`) as they represent
alternative docstring formats. Enabling `ALL` without further configuration may result in suboptimal
behavior, especially for the `pydocstyle` plugin.
As a special-case, Ruff also supports the `ALL` code, which enables all rules. Note that some of the
`pydocstyle` rules conflict (e.g., `D203` and `D211`) as they represent alternative docstring
formats. Enabling `ALL` without further configuration may result in suboptimal behavior, especially
for the `pydocstyle` plugin.
As an alternative to `pyproject.toml`, Ruff will also respect a `ruff.toml` file, which implements
an equivalent schema (though the `[tool.ruff]` hierarchy can be omitted). For example, the
@@ -326,17 +326,17 @@ Options:
-v, --verbose
Enable verbose logging
-q, --quiet
Only log errors
Print lint violations, but nothing else
-s, --silent
Disable all logging (but still exit with status code "1" upon detecting errors)
Disable all logging (but still exit with status code "1" upon detecting lint violations)
-e, --exit-zero
Exit with status code "0", even upon detecting errors
Exit with status code "0", even upon detecting lint violations
-w, --watch
Run in watch mode by re-running whenever files change
--fix
Attempt to automatically fix lint errors
Attempt to automatically fix lint violations
--fix-only
Fix any fixable lint errors, but don't report on leftover violations. Implies `--fix`
Fix any fixable lint violations, but don't report on leftover violations. Implies `--fix`
--diff
Avoid writing any fixed files back; instead, output a diff for each changed file to stdout
-n, --no-cache
@@ -346,23 +346,23 @@ Options:
--select <SELECT>
Comma-separated list of rule codes to enable (or ALL, to enable all rules)
--extend-select <EXTEND_SELECT>
Like --select, but adds additional error codes on top of the selected ones
Like --select, but adds additional rule codes on top of the selected ones
--ignore <IGNORE>
Comma-separated list of error codes to disable
Comma-separated list of rule codes to disable
--extend-ignore <EXTEND_IGNORE>
Like --ignore, but adds additional error codes on top of the ignored ones
Like --ignore, but adds additional rule codes on top of the ignored ones
--exclude <EXCLUDE>
List of paths, used to exclude files and/or directories from checks
List of paths, used to omit files and/or directories from analysis
--extend-exclude <EXTEND_EXCLUDE>
Like --exclude, but adds additional files and directories on top of the excluded ones
Like --exclude, but adds additional files and directories on top of those already excluded
--fixable <FIXABLE>
List of error codes to treat as eligible for autofix. Only applicable when autofix itself is enabled (e.g., via `--fix`)
List of rule codes to treat as eligible for autofix. Only applicable when autofix itself is enabled (e.g., via `--fix`)
--unfixable <UNFIXABLE>
List of error codes to treat as ineligible for autofix. Only applicable when autofix itself is enabled (e.g., via `--fix`)
List of rule codes to treat as ineligible for autofix. Only applicable when autofix itself is enabled (e.g., via `--fix`)
--per-file-ignores <PER_FILE_IGNORES>
List of mappings from file pattern to code to exclude
--format <FORMAT>
Output serialization format for error messages [env: RUFF_FORMAT=] [possible values: text, json, junit, grouped, github, gitlab]
Output serialization format for violations [env: RUFF_FORMAT=] [possible values: text, json, junit, grouped, github, gitlab]
--stdin-filename <STDIN_FILENAME>
The name of the file when passing it through stdin
--cache-dir <CACHE_DIR>
@@ -380,7 +380,7 @@ Options:
--target-version <TARGET_VERSION>
The minimum Python version that should be supported
--line-length <LINE_LENGTH>
Set the line-length for length-associated checks and automatic formatting
Set the line-length for length-associated rules and automatic formatting
--max-complexity <MAX_COMPLEXITY>
Maximum McCabe complexity allowed for a given function
--add-noqa
@@ -392,7 +392,7 @@ Options:
--show-files
See the files Ruff will be run against with the current settings
--show-settings
See the settings Ruff will use to check a given Python file
See the settings Ruff will use to lint a given Python file
-h, --help
Print help information
-V, --version
@@ -449,16 +449,16 @@ in each directory's `pyproject.toml` file.
By default, Ruff will also skip any files that are omitted via `.ignore`, `.gitignore`,
`.git/info/exclude`, and global `gitignore` files (see: [`respect-gitignore`](#respect-gitignore)).
Files that are passed to `ruff` directly are always checked, regardless of the above criteria.
For example, `ruff /path/to/excluded/file.py` will always check `file.py`.
Files that are passed to `ruff` directly are always linted, regardless of the above criteria.
For example, `ruff /path/to/excluded/file.py` will always lint `file.py`.
### Ignoring errors
To omit a lint check entirely, add it to the "ignore" list via [`ignore`](#ignore) or
To omit a lint rule entirely, add it to the "ignore" list via [`ignore`](#ignore) or
[`extend-ignore`](#extend-ignore), either on the command-line or in your `pyproject.toml` file.
To ignore an error inline, Ruff uses a `noqa` system similar to [Flake8](https://flake8.pycqa.org/en/3.1.1/user/ignoring-errors.html).
To ignore an individual error, add `# noqa: {code}` to the end of the line, like so:
To ignore a violation inline, Ruff uses a `noqa` system similar to [Flake8](https://flake8.pycqa.org/en/3.1.1/user/ignoring-errors.html).
To ignore an individual violation, add `# noqa: {code}` to the end of the line, like so:
```python
# Ignore F841.
@@ -467,7 +467,7 @@ x = 1 # noqa: F841
# Ignore E741 and F841.
i = 1 # noqa: E741, F841
# Ignore _all_ errors.
# Ignore _all_ violations.
x = 1 # noqa
```
@@ -481,9 +481,9 @@ Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor i
""" # noqa: E501
```
To ignore all errors across an entire file, Ruff supports Flake8's `# flake8: noqa` directive (or,
equivalently, `# ruff: noqa`). Adding either of those directives to any part of a file will disable
error reporting for the entire file.
To ignore all violations across an entire file, Ruff supports Flake8's `# flake8: noqa` directive
(or, equivalently, `# ruff: noqa`). Adding either of those directives to any part of a file will
disable enforcement across the entire file.
For targeted exclusions across entire files (e.g., "Ignore all F841 violations in
`/path/to/file.py`"), see the [`per-file-ignores`](#per-file-ignores) configuration setting.
@@ -502,8 +502,8 @@ for more.
Ruff supports several workflows to aid in `noqa` management.
First, Ruff provides a special error code, `RUF100`, to enforce that your `noqa` directives are
"valid", in that the errors they _say_ they ignore are actually being triggered on that line (and
First, Ruff provides a special rule code, `RUF100`, to enforce that your `noqa` directives are
"valid", in that the violations they _say_ they ignore are actually being triggered on that line (and
thus suppressed). You can run `ruff /path/to/file.py --extend-select RUF100` to flag unused `noqa`
directives.
@@ -513,13 +513,13 @@ You can run `ruff /path/to/file.py --extend-select RUF100 --fix` to automaticall
Third, Ruff can _automatically add_ `noqa` directives to all failing lines. This is useful when
migrating a new codebase to Ruff. You can run `ruff /path/to/file.py --add-noqa` to automatically
add `noqa` directives to all failing lines, with the appropriate error codes.
add `noqa` directives to all failing lines, with the appropriate rule codes.
## Supported Rules
Regardless of the rule's origin, Ruff re-implements every rule in Rust as a first-party feature.
By default, Ruff enables all `E` and `F` error codes, which correspond to those built-in to Flake8.
By default, Ruff enables all `E` and `F` rule codes, which correspond to those built-in to Flake8.
The 🛠 emoji indicates that a rule is automatically fixable by the `--fix` command-line option.
@@ -614,6 +614,7 @@ For more, see [isort](https://pypi.org/project/isort/5.10.1/) on PyPI.
| Code | Name | Message | Fix |
| ---- | ---- | ------- | --- |
| I001 | UnsortedImports | Import block is un-sorted or un-formatted | 🛠 |
| I002 | MissingRequiredImport | Missing required import: `from __future__ import ...` | 🛠 |
### pydocstyle (D)
@@ -701,6 +702,7 @@ For more, see [pyupgrade](https://pypi.org/project/pyupgrade/3.2.0/) on PyPI.
| UP027 | RewriteListComprehension | Replace unpacked list comprehension with a generator expression | 🛠 |
| UP028 | RewriteYieldFrom | Replace `yield` over `for` loop with `yield from` | 🛠 |
| UP029 | UnnecessaryBuiltinImport | Unnecessary builtin import: `...` | 🛠 |
| UP030 | FormatLiterals | Use implicit references for positional format fields | 🛠 |
### pep8-naming (N)
@@ -777,6 +779,8 @@ For more, see [flake8-bandit](https://pypi.org/project/flake8-bandit/4.1.1/) on
| S324 | HashlibInsecureHashFunction | Probable use of insecure hash functions in `hashlib`: "..." | |
| S501 | RequestWithNoCertValidation | Probable use of `...` call with `verify=False` disabling SSL certificate checks | |
| S506 | UnsafeYAMLLoad | Probable use of unsafe `yaml.load`. Allows instantiation of arbitrary objects. Consider `yaml.safe_load`. | |
| S508 | SnmpInsecureVersion | The use of SNMPv1 and SNMPv2 is insecure. Use SNMPv3 if able. | |
| S509 | SnmpWeakCryptography | You should not use SNMPv3 without encryption. `noAuthNoPriv` & `authNoPriv` is insecure. | |
### flake8-blind-except (BLE)
@@ -980,6 +984,7 @@ For more, see [flake8-simplify](https://pypi.org/project/flake8-simplify/0.19.3/
| SIM109 | CompareWithTuple | Use `value in (..., ...)` instead of `value == ... or value == ...` | 🛠 |
| SIM110 | ConvertLoopToAny | Use `return any(x for x in y)` instead of `for` loop | 🛠 |
| SIM111 | ConvertLoopToAll | Use `return all(x for x in y)` instead of `for` loop | 🛠 |
| SIM112 | UseCapitalEnvironmentVariables | Use capitalized environment variable `...` instead of `...` | 🛠 |
| SIM117 | MultipleWithStatements | Use a single `with` statement with multiple contexts instead of nested `with` statements | |
| SIM118 | KeyInDict | Use `key in dict` instead of `key in dict.keys()` | 🛠 |
| SIM201 | NegateEqualOp | Use `left != right` instead of `not left == right` | 🛠 |
@@ -1066,8 +1071,8 @@ For more, see [pygrep-hooks](https://github.com/pre-commit/pygrep-hooks) on GitH
| ---- | ---- | ------- | --- |
| PGH001 | NoEval | No builtin `eval()` allowed | |
| PGH002 | DeprecatedLogWarn | `warn` is deprecated in favor of `warning` | |
| PGH003 | BlanketTypeIgnore | Use specific error codes when ignoring type issues | |
| PGH004 | BlanketNOQA | Use specific error codes when using `noqa` | |
| PGH003 | BlanketTypeIgnore | Use specific rule codes when ignoring type issues | |
| PGH004 | BlanketNOQA | Use specific rule codes when using `noqa` | |
### Pylint (PLC, PLE, PLR, PLW)
@@ -1393,7 +1398,7 @@ natively, including:
- [`pyupgrade`](https://pypi.org/project/pyupgrade/) ([#827](https://github.com/charliermarsh/ruff/issues/827))
- [`yesqa`](https://github.com/asottile/yesqa)
Note that, in some cases, Ruff uses different error code prefixes than would be found in the
Note that, in some cases, Ruff uses different rule codes and prefixes than would be found in the
originating Flake8 plugins. For example, Ruff uses `TID252` to represent the `I252` rule from
`flake8-tidy-imports`. This helps minimize conflicts across plugins and allows any individual plugin
to be toggled on or off with a single (e.g.) `--select TID`, as opposed to `--select I2` (to avoid
@@ -1418,9 +1423,9 @@ At time of writing, Pylint implements 409 total rules, while Ruff implements 224
at least 60 overlap with the Pylint rule set. Subjectively, Pylint tends to implement more rules
based on type inference (e.g., validating the number of arguments in a function call).
Like Flake8, Pylint supports plugins (called "checkers"), while Ruff implements all checks natively.
Like Flake8, Pylint supports plugins (called "checkers"), while Ruff implements all rules natively.
Unlike Pylint, Ruff is capable of automatically fixing its own lint errors.
Unlike Pylint, Ruff is capable of automatically fixing its own lint violations.
Pylint parity is being tracked in [#689](https://github.com/charliermarsh/ruff/issues/689).
@@ -1533,7 +1538,7 @@ For example, if you're coming from `flake8-docstrings`, and your originating con
`--docstring-convention=numpy`, you'd instead set `convention = "numpy"` in your `pyproject.toml`,
as above.
Alongside `convention`, you'll want to explicitly enable the `D` error code class, like so:
Alongside `convention`, you'll want to explicitly enable the `D` rule code prefix, like so:
```toml
[tool.ruff]
@@ -1786,7 +1791,7 @@ cache-dir = "~/.cache/ruff"
#### [`dummy-variable-rgx`](#dummy-variable-rgx)
A regular expression used to identify "dummy" variables, or those which
should be ignored when evaluating (e.g.) unused-variable checks. The
should be ignored when enforcing (e.g.) unused-variable rules. The
default expression matches `_`, `__`, and `_var`, but not `_var_`.
**Default value**: `"^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"`
@@ -1880,8 +1885,8 @@ extend-exclude = ["tests", "src/bad.py"]
#### [`extend-ignore`](#extend-ignore)
A list of check code prefixes to ignore, in addition to those specified
by `ignore`.
A list of rule codes or prefixes to ignore, in addition to those
specified by `ignore`.
**Default value**: `[]`
@@ -1891,7 +1896,7 @@ by `ignore`.
```toml
[tool.ruff]
# Skip unused variable checks (`F841`).
# Skip unused variable rules (`F841`).
extend-ignore = ["F841"]
```
@@ -1899,8 +1904,8 @@ extend-ignore = ["F841"]
#### [`extend-select`](#extend-select)
A list of check code prefixes to enable, in addition to those specified
by `select`.
A list of rule codes or prefixes to enable, in addition to those
specified by `select`.
**Default value**: `[]`
@@ -1918,7 +1923,7 @@ extend-select = ["B", "Q"]
#### [`external`](#external)
A list of check codes that are unsupported by Ruff, but should be
A list of rule codes that are unsupported by Ruff, but should be
preserved when (e.g.) validating `# noqa` directives. Useful for
retaining `# noqa` directives that cover plugins not yet implemented
by Ruff.
@@ -1975,7 +1980,7 @@ fix-only = true
#### [`fixable`](#fixable)
A list of check code prefixes to consider autofix-able.
A list of rule codes or prefixes to consider autofixable.
**Default value**: `["A", "ANN", "ARG", "B", "BLE", "C", "D", "E", "ERA", "F", "FBT", "I", "ICN", "N", "PGH", "PLC", "PLE", "PLR", "PLW", "Q", "RET", "RUF", "S", "T", "TID", "UP", "W", "YTT"]`
@@ -1985,7 +1990,7 @@ A list of check code prefixes to consider autofix-able.
```toml
[tool.ruff]
# Only allow autofix behavior for `E` and `F` checks.
# Only allow autofix behavior for `E` and `F` rules.
fixable = ["E", "F"]
```
@@ -2041,11 +2046,11 @@ format = "grouped"
#### [`ignore`](#ignore)
A list of check code prefixes to ignore. Prefixes can specify exact
checks (like `F841`), entire categories (like `F`), or anything in
A list of rule codes or prefixes to ignore. Prefixes can specify exact
rules (like `F841`), entire categories (like `F`), or anything in
between.
When breaking ties between enabled and disabled checks (via `select` and
When breaking ties between enabled and disabled rules (via `select` and
`ignore`, respectively), more specific prefixes override less
specific prefixes.
@@ -2057,7 +2062,7 @@ specific prefixes.
```toml
[tool.ruff]
# Skip unused variable checks (`F841`).
# Skip unused variable rules (`F841`).
ignore = ["F841"]
```
@@ -2105,8 +2110,8 @@ line-length = 120
#### [`per-file-ignores`](#per-file-ignores)
A list of mappings from file pattern to check code prefixes to exclude,
when considering any matching files.
A list of mappings from file pattern to rule codes or prefixes to
exclude, when considering any matching files.
**Default value**: `{}`
@@ -2164,11 +2169,11 @@ respect_gitignore = false
#### [`select`](#select)
A list of check code prefixes to enable. Prefixes can specify exact
checks (like `F841`), entire categories (like `F`), or anything in
A list of rule codes or prefixes to enable. Prefixes can specify exact
rules (like `F841`), entire categories (like `F`), or anything in
between.
When breaking ties between enabled and disabled checks (via `select` and
When breaking ties between enabled and disabled rules (via `select` and
`ignore`, respectively), more specific prefixes override less
specific prefixes.
@@ -2188,8 +2193,8 @@ select = ["E", "F", "B", "Q"]
#### [`show-source`](#show-source)
Whether to show source code snippets when reporting lint error
violations (overridden by the `--show-source` command-line flag).
Whether to show source code snippets when reporting lint violations
(overridden by the `--show-source` command-line flag).
**Default value**: `false`
@@ -2272,7 +2277,7 @@ target-version = "py37"
A list of task tags to recognize (e.g., "TODO", "FIXME", "XXX").
Comments starting with these tags will be ignored by commented-out code
detection (`ERA`), and skipped by line-length checks (`E501`) if
detection (`ERA`), and skipped by line-length rules (`E501`) if
`ignore-overlong-task-comments` is set to `true`.
**Default value**: `["TODO", "FIXME", "XXX"]`
@@ -2288,9 +2293,33 @@ task-tags = ["HACK"]
---
#### [`typing-modules`](#typing-modules)
A list of modules whose imports should be treated equivalently to
members of the `typing` module.
This is useful for ensuring proper type annotation inference for
projects that re-export `typing` and `typing_extensions` members
from a compatibility module. If omitted, any members imported from
modules apart from `typing` and `typing_extensions` will be treated
as ordinary Python objects.
**Default value**: `[]`
**Type**: `Vec<String>`
**Example usage**:
```toml
[tool.ruff]
typing-modules = ["airflow.typing_compat"]
```
---
#### [`unfixable`](#unfixable)
A list of check code prefixes to consider un-autofix-able.
A list of rule codes or prefixes to consider non-autofix-able.
**Default value**: `[]`
@@ -2364,7 +2393,7 @@ mypy-init-return = true
#### [`suppress-dummy-args`](#suppress-dummy-args)
Whether to suppress `ANN000`-level errors for arguments matching the
Whether to suppress `ANN000`-level violations for arguments matching the
"dummy" variable regex (like `_`).
**Default value**: `false`
@@ -2382,8 +2411,8 @@ suppress-dummy-args = true
#### [`suppress-none-returning`](#suppress-none-returning)
Whether to suppress `ANN200`-level errors for functions that meet either
of the following criteria:
Whether to suppress `ANN200`-level violations for functions that meet
either of the following criteria:
- Contain no `return` statement.
- Explicit `return` statement(s) all return `None` (explicitly or
@@ -2444,7 +2473,7 @@ extend-hardcoded-tmp-directory = ["/foo/bar"]
#### [`extend-immutable-calls`](#extend-immutable-calls)
Additional callable functions to consider "immutable" when evaluating,
e.g., `no-mutable-default-argument` checks (`B006`).
e.g., the `no-mutable-default-argument` rule (`B006`).
**Default value**: `[]`
@@ -2531,9 +2560,9 @@ will be added to the `aliases` mapping.
Boolean flag specifying whether `@pytest.fixture()` without parameters
should have parentheses. If the option is set to `true` (the
default), `@pytest.fixture()` is valid and `@pytest.fixture` is an
error. If set to `false`, `@pytest.fixture` is valid and
`@pytest.fixture()` is an error.
default), `@pytest.fixture()` is valid and `@pytest.fixture` is
invalid. If set to `false`, `@pytest.fixture` is valid and
`@pytest.fixture()` is invalid.
**Default value**: `true`
@@ -2552,9 +2581,9 @@ fixture-parentheses = true
Boolean flag specifying whether `@pytest.mark.foo()` without parameters
should have parentheses. If the option is set to `true` (the
default), `@pytest.mark.foo()` is valid and `@pytest.mark.foo` is an
error. If set to `false`, `@pytest.fixture` is valid and
`@pytest.mark.foo()` is an error.
default), `@pytest.mark.foo()` is valid and `@pytest.mark.foo` is
invalid. If set to `false`, `@pytest.fixture` is valid and
`@pytest.mark.foo()` is invalid.
**Default value**: `true`
@@ -2776,7 +2805,7 @@ ban-relative-imports = "all"
#### [`banned-api`](#banned-api)
Specific modules or module members that may not be imported or accessed.
Note that this check is only meant to flag accidental uses,
Note that this rule is only meant to flag accidental uses,
and can be circumvented via `eval` or `importlib`.
**Default value**: `{}`
@@ -2974,6 +3003,23 @@ order-by-type = true
---
#### [`required-imports`](#required-imports)
Add the specified import line to all files.
**Default value**: `[]`
**Type**: `Vec<String>`
**Example usage**:
```toml
[tool.ruff.isort]
add-import = ["from __future__ import annotations"]
```
---
#### [`single-line-exclusions`](#single-line-exclusions)
One or more modules to exclude from the single line rule.
@@ -3096,7 +3142,7 @@ staticmethod-decorators = ["staticmethod", "stcmthd"]
#### [`ignore-overlong-task-comments`](#ignore-overlong-task-comments)
Whether or not line-length checks (`E501`) should be triggered for
Whether or not line-length violations (`E501`) should be triggered for
comments starting with `task-tags` (by default: ["TODO", "FIXME",
and "XXX"]).
@@ -3167,4 +3213,4 @@ MIT
## Contributing
Contributions are welcome and hugely appreciated. To get started, check out the
[contributing guidelines](https://github.com/charliermarsh/ruff/blob/main/.github/CONTRIBUTING.md).
[contributing guidelines](https://github.com/charliermarsh/ruff/blob/main/CONTRIBUTING.md).

View File

@@ -771,7 +771,7 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
[[package]]
name = "flake8_to_ruff"
version = "0.0.216"
version = "0.0.218"
dependencies = [
"anyhow",
"clap",
@@ -1975,7 +1975,7 @@ dependencies = [
[[package]]
name = "ruff"
version = "0.0.216"
version = "0.0.218"
dependencies = [
"anyhow",
"bincode",

View File

@@ -1,10 +1,11 @@
[package]
name = "flake8-to-ruff"
version = "0.0.216-dev.0"
version = "0.0.218-dev.0"
edition = "2021"
[lib]
name = "flake8_to_ruff"
doctest = false
[dependencies]
anyhow = { version = "1.0.66" }

View File

@@ -84,7 +84,7 @@ flake8-to-ruff path/to/.flake8 --plugin flake8-builtins --plugin flake8-quotes
1. Ruff only supports a subset of the Flake configuration options. `flake8-to-ruff` will warn on and
ignore unsupported options in the `.flake8` file (or equivalent). (Similarly, Ruff has a few
configuration options that don't exist in Flake8.)
2. Ruff will omit any error codes that are unimplemented or unsupported by Ruff, including error
2. Ruff will omit any rule codes that are unimplemented or unsupported by Ruff, including rule
codes from unsupported plugins. (See the [Ruff README](https://github.com/charliermarsh/ruff#user-content-how-does-ruff-compare-to-flake8)
for the complete list of supported plugins.)

View File

@@ -30,7 +30,7 @@ pub fn convert(
.get("flake8")
.expect("Unable to find flake8 section in INI file");
// Extract all referenced check code prefixes, to power plugin inference.
// Extract all referenced rule code prefixes, to power plugin inference.
let mut referenced_codes: BTreeSet<RuleCodePrefix> = BTreeSet::default();
for (key, value) in flake8 {
if let Some(value) = value {
@@ -435,6 +435,7 @@ mod tests {
src: None,
target_version: None,
unfixable: None,
typing_modules: None,
task_tags: None,
update_check: None,
flake8_annotations: None,
@@ -499,6 +500,7 @@ mod tests {
src: None,
target_version: None,
unfixable: None,
typing_modules: None,
task_tags: None,
update_check: None,
flake8_annotations: None,
@@ -563,6 +565,7 @@ mod tests {
src: None,
target_version: None,
unfixable: None,
typing_modules: None,
task_tags: None,
update_check: None,
flake8_annotations: None,
@@ -627,6 +630,7 @@ mod tests {
src: None,
target_version: None,
unfixable: None,
typing_modules: None,
task_tags: None,
update_check: None,
flake8_annotations: None,
@@ -691,6 +695,7 @@ mod tests {
src: None,
target_version: None,
unfixable: None,
typing_modules: None,
task_tags: None,
update_check: None,
flake8_annotations: None,
@@ -764,6 +769,7 @@ mod tests {
src: None,
target_version: None,
unfixable: None,
typing_modules: None,
task_tags: None,
update_check: None,
flake8_annotations: None,
@@ -831,6 +837,7 @@ mod tests {
src: None,
target_version: None,
unfixable: None,
typing_modules: None,
task_tags: None,
update_check: None,
flake8_annotations: None,

View File

@@ -1,6 +1,11 @@
import { useCallback, useEffect, useState } from "react";
import { DEFAULT_PYTHON_SOURCE } from "../constants";
import init, { check, Check, currentVersion, defaultSettings } from "../pkg";
import init, {
check,
Diagnostic,
currentVersion,
defaultSettings,
} from "../pkg";
import { ErrorMessage } from "./ErrorMessage";
import Header from "./Header";
import { useTheme } from "./theme";
@@ -18,7 +23,7 @@ export default function Editor() {
const [edit, setEdit] = useState<number>(0);
const [settingsSource, setSettingsSource] = useState<string | null>(null);
const [pythonSource, setPythonSource] = useState<string | null>(null);
const [checks, setChecks] = useState<Check[]>([]);
const [diagnostics, setDiagnostics] = useState<Diagnostic[]>([]);
const [error, setError] = useState<string | null>(null);
const [theme, setTheme] = useTheme();
@@ -32,25 +37,25 @@ export default function Editor() {
}
let config: any;
let checks: Check[];
let diagnostics: Diagnostic[];
try {
config = JSON.parse(settingsSource);
} catch (e) {
setChecks([]);
setDiagnostics([]);
setError((e as Error).message);
return;
}
try {
checks = check(pythonSource, config);
diagnostics = check(pythonSource, config);
} catch (e) {
setError(e as string);
return;
}
setError(null);
setChecks(checks);
setDiagnostics(diagnostics);
}, [initialized, settingsSource, pythonSource]);
useEffect(() => {
@@ -122,7 +127,7 @@ export default function Editor() {
visible={tab === "Source"}
source={pythonSource}
theme={theme}
checks={checks}
diagnostics={diagnostics}
onChange={handlePythonSourceChange}
/>
<SettingsEditor

View File

@@ -5,19 +5,19 @@
import Editor, { useMonaco } from "@monaco-editor/react";
import { MarkerSeverity, MarkerTag } from "monaco-editor";
import { useCallback, useEffect } from "react";
import { Check } from "../pkg";
import { Diagnostic } from "../pkg";
import { Theme } from "./theme";
export default function SourceEditor({
visible,
source,
theme,
checks,
diagnostics,
onChange,
}: {
visible: boolean;
source: string;
checks: Check[];
diagnostics: Diagnostic[];
theme: Theme;
onChange: (pythonSource: string) => void;
}) {
@@ -33,15 +33,15 @@ export default function SourceEditor({
editor.setModelMarkers(
model,
"owner",
checks.map((check) => ({
startLineNumber: check.location.row,
startColumn: check.location.column + 1,
endLineNumber: check.end_location.row,
endColumn: check.end_location.column + 1,
message: `${check.code}: ${check.message}`,
diagnostics.map((diagnostic) => ({
startLineNumber: diagnostic.location.row,
startColumn: diagnostic.location.column + 1,
endLineNumber: diagnostic.end_location.row,
endColumn: diagnostic.end_location.column + 1,
message: `${diagnostic.code}: ${diagnostic.message}`,
severity: MarkerSeverity.Error,
tags:
check.code === "F401" || check.code === "F841"
diagnostic.code === "F401" || diagnostic.code === "F841"
? [MarkerTag.Unnecessary]
: [],
})),
@@ -52,7 +52,7 @@ export default function SourceEditor({
{
// @ts-expect-error: The type definition is wrong.
provideCodeActions: function (model, position) {
const actions = checks
const actions = diagnostics
.filter((check) => position.startLineNumber === check.location.row)
.filter((check) => check.fix)
.map((check) => ({
@@ -89,7 +89,7 @@ export default function SourceEditor({
return () => {
codeActionProvider?.dispose();
};
}, [checks, monaco]);
}, [diagnostics, monaco]);
const handleChange = useCallback(
(value: string | undefined) => {

View File

@@ -4,7 +4,7 @@ build-backend = "maturin"
[project]
name = "ruff"
version = "0.0.216"
version = "0.0.218"
description = "An extremely fast Python linter, written in Rust."
authors = [
{ name = "Charlie Marsh", email = "charlie.r.marsh@gmail.com" },

View File

@@ -0,0 +1,6 @@
from pysnmp.hlapi import CommunityData
CommunityData("public", mpModel=0) # S508
CommunityData("public", mpModel=1) # S508
CommunityData("public", mpModel=2) # OK

View File

@@ -0,0 +1,7 @@
from pysnmp.hlapi import UsmUserData
insecure = UsmUserData("securityName") # S509
auth_no_priv = UsmUserData("securityName", "authName") # S509
less_insecure = UsmUserData("securityName", "authName", "privName") # OK

View File

@@ -25,10 +25,10 @@ for x in range(3):
def check_inside_functions_too():
ls = [lambda: x for x in range(2)]
st = {lambda: x for x in range(2)}
gn = (lambda: x for x in range(2))
dt = {x: lambda: x for x in range(2)}
ls = [lambda: x for x in range(2)] # error
st = {lambda: x for x in range(2)} # error
gn = (lambda: x for x in range(2)) # error
dt = {x: lambda: x for x in range(2)} # error
async def pointless_async_iterable():
@@ -37,9 +37,9 @@ async def pointless_async_iterable():
async def container_for_problems():
async for x in pointless_async_iterable():
functions.append(lambda: x)
functions.append(lambda: x) # error
[lambda: x async for x in pointless_async_iterable()]
[lambda: x async for x in pointless_async_iterable()] # error
a = 10
@@ -47,10 +47,10 @@ b = 0
while True:
a = a_ = a - 1
b += 1
functions.append(lambda: a)
functions.append(lambda: a_)
functions.append(lambda: b)
functions.append(lambda: c) # not a name error because of late binding!
functions.append(lambda: a) # error
functions.append(lambda: a_) # error
functions.append(lambda: b) # error
functions.append(lambda: c) # error, but not a name error due to late binding
c: bool = a > 3
if not c:
break
@@ -58,7 +58,7 @@ while True:
# Nested loops should not duplicate reports
for j in range(2):
for k in range(3):
lambda: j * k
lambda: j * k # error
for j, k, l in [(1, 2, 3)]:
@@ -80,3 +80,95 @@ for var in range(2):
for i in range(3):
lambda: f"{i}"
# `query` is defined in the function, so also defining it in the loop should be OK.
for name in ["a", "b"]:
query = name
def myfunc(x):
query = x
query_post = x
_ = query
_ = query_post
query_post = name # in case iteration order matters
# Bug here because two dict comprehensions reference `name`, one of which is inside
# the lambda. This should be totally fine, of course.
_ = {
k: v
for k, v in reduce(
lambda data, event: merge_mappings(
[data, {name: f(caches, data, event) for name, f in xx}]
),
events,
{name: getattr(group, name) for name in yy},
).items()
if k in backfill_fields
}
# OK to define lambdas if they're immediately consumed, typically as the `key=`
# argument or in a consumed `filter()` (even if a comprehension is better style)
for x in range(2):
# It's not a complete get-out-of-linting-free construct - these should fail:
min([None, lambda: x], key=repr)
sorted([None, lambda: x], key=repr)
any(filter(bool, [None, lambda: x]))
list(filter(bool, [None, lambda: x]))
all(reduce(bool, [None, lambda: x]))
# But all these should be OK:
min(range(3), key=lambda y: x * y)
max(range(3), key=lambda y: x * y)
sorted(range(3), key=lambda y: x * y)
any(map(lambda y: x < y, range(3)))
all(map(lambda y: x < y, range(3)))
set(map(lambda y: x < y, range(3)))
list(map(lambda y: x < y, range(3)))
tuple(map(lambda y: x < y, range(3)))
sorted(map(lambda y: x < y, range(3)))
frozenset(map(lambda y: x < y, range(3)))
any(filter(lambda y: x < y, range(3)))
all(filter(lambda y: x < y, range(3)))
set(filter(lambda y: x < y, range(3)))
list(filter(lambda y: x < y, range(3)))
tuple(filter(lambda y: x < y, range(3)))
sorted(filter(lambda y: x < y, range(3)))
frozenset(filter(lambda y: x < y, range(3)))
any(reduce(lambda y: x | y, range(3)))
all(reduce(lambda y: x | y, range(3)))
set(reduce(lambda y: x | y, range(3)))
list(reduce(lambda y: x | y, range(3)))
tuple(reduce(lambda y: x | y, range(3)))
sorted(reduce(lambda y: x | y, range(3)))
frozenset(reduce(lambda y: x | y, range(3)))
import functools
any(functools.reduce(lambda y: x | y, range(3)))
all(functools.reduce(lambda y: x | y, range(3)))
set(functools.reduce(lambda y: x | y, range(3)))
list(functools.reduce(lambda y: x | y, range(3)))
tuple(functools.reduce(lambda y: x | y, range(3)))
sorted(functools.reduce(lambda y: x | y, range(3)))
frozenset(functools.reduce(lambda y: x | y, range(3)))
# OK because the lambda which references a loop variable is defined in a `return`
# statement, and after we return the loop variable can't be redefined.
# In principle we could do something fancy with `break`, but it's not worth it.
def iter_f(names):
for name in names:
if exists(name):
return lambda: name if exists(name) else None
if foo(name):
return [lambda: name] # known false alarm
if False:
return [lambda: i for i in range(3)] # error

View File

@@ -29,3 +29,20 @@ else:
b = 1
else:
b = 2
import sys
if sys.version_info >= (3, 9):
randbytes = random.randbytes
else:
randbytes = _get_random_bytes
if sys.platform == "darwin":
randbytes = random.randbytes
else:
randbytes = _get_random_bytes
if sys.platform.startswith("linux"):
randbytes = random.randbytes
else:
randbytes = _get_random_bytes

View File

@@ -0,0 +1,19 @@
import os
# Bad
os.environ['foo']
os.environ.get('foo')
os.environ.get('foo', 'bar')
os.getenv('foo')
# Good
os.environ['FOO']
os.environ.get('FOO')
os.environ.get('FOO', 'bar')
os.getenv('FOO')

View File

@@ -0,0 +1,3 @@
#!/usr/bin/env python3
x = 1

View File

@@ -0,0 +1,3 @@
"""Hello, world!"""
x = 1

View File

@@ -0,0 +1 @@
"""Hello, world!"""

View File

@@ -0,0 +1,2 @@
"""Hello, world!"""; x = \
1; y = 2

View File

@@ -0,0 +1 @@
"""Hello, world!"""; x = 1

View File

@@ -0,0 +1,2 @@
from __future__ import generator_stop
import os

View File

@@ -0,0 +1,7 @@
from typing import Union
from airflow.typing_compat import Literal, Optional
X = Union[Literal[False], Literal["db"]]
y = Optional["Class"]

View File

@@ -0,0 +1,36 @@
# Invalid calls; errors expected.
"{0}" "{1}" "{2}".format(1, 2, 3)
"a {3} complicated {1} string with {0} {2}".format(
"first", "second", "third", "fourth"
)
'{0}'.format(1)
'{0:x}'.format(30)
x = '{0}'.format(1)
'''{0}\n{1}\n'''.format(1, 2)
x = "foo {0}" \
"bar {1}".format(1, 2)
("{0}").format(1)
"\N{snowman} {0}".format(1)
'{' '0}'.format(1)
# These will not change because we are waiting for libcst to fix this issue:
# https://github.com/Instagram/LibCST/issues/846
print(
'foo{0}'
'bar{1}'.format(1, 2)
)
print(
'foo{0}' # ohai\n"
'bar{1}'.format(1, 2)
)

View File

@@ -0,0 +1,23 @@
# Valid calls; no errors expected.
'{}'.format(1)
x = ('{0} {1}',)
'{0} {0}'.format(1)
'{0:<{1}}'.format(1, 4)
f"{0}".format(a)
f"{0}".format(1)
print(f"{0}".format(1))
# I did not include the following tests because ruff does not seem to work with
# invalid python syntax (which is a good thing)
# "{0}"format(1)
# '{'.format(1)", "'}'.format(1)
# ("{0}" # {1}\n"{2}").format(1, 2, 3)

View File

@@ -33,7 +33,7 @@
]
},
"dummy-variable-rgx": {
"description": "A regular expression used to identify \"dummy\" variables, or those which should be ignored when evaluating (e.g.) unused-variable checks. The default expression matches `_`, `__`, and `_var`, but not `_var_`.",
"description": "A regular expression used to identify \"dummy\" variables, or those which should be ignored when enforcing (e.g.) unused-variable rules. The default expression matches `_`, `__`, and `_var`, but not `_var_`.",
"type": [
"string",
"null"
@@ -67,7 +67,7 @@
}
},
"extend-ignore": {
"description": "A list of check code prefixes to ignore, in addition to those specified by `ignore`.",
"description": "A list of rule codes or prefixes to ignore, in addition to those specified by `ignore`.",
"type": [
"array",
"null"
@@ -77,7 +77,7 @@
}
},
"extend-select": {
"description": "A list of check code prefixes to enable, in addition to those specified by `select`.",
"description": "A list of rule codes or prefixes to enable, in addition to those specified by `select`.",
"type": [
"array",
"null"
@@ -87,7 +87,7 @@
}
},
"external": {
"description": "A list of check codes that are unsupported by Ruff, but should be preserved when (e.g.) validating `# noqa` directives. Useful for retaining `# noqa` directives that cover plugins not yet implemented by Ruff.",
"description": "A list of rule codes that are unsupported by Ruff, but should be preserved when (e.g.) validating `# noqa` directives. Useful for retaining `# noqa` directives that cover plugins not yet implemented by Ruff.",
"type": [
"array",
"null"
@@ -111,7 +111,7 @@
]
},
"fixable": {
"description": "A list of check code prefixes to consider autofix-able.",
"description": "A list of rule codes or prefixes to consider autofixable.",
"type": [
"array",
"null"
@@ -238,7 +238,7 @@
]
},
"ignore": {
"description": "A list of check code prefixes to ignore. Prefixes can specify exact checks (like `F841`), entire categories (like `F`), or anything in between.\n\nWhen breaking ties between enabled and disabled checks (via `select` and `ignore`, respectively), more specific prefixes override less specific prefixes.",
"description": "A list of rule codes or prefixes to ignore. Prefixes can specify exact rules (like `F841`), entire categories (like `F`), or anything in between.\n\nWhen breaking ties between enabled and disabled rules (via `select` and `ignore`, respectively), more specific prefixes override less specific prefixes.",
"type": [
"array",
"null"
@@ -297,7 +297,7 @@
]
},
"per-file-ignores": {
"description": "A list of mappings from file pattern to check code prefixes to exclude, when considering any matching files.",
"description": "A list of mappings from file pattern to rule codes or prefixes to exclude, when considering any matching files.",
"type": [
"object",
"null"
@@ -361,7 +361,7 @@
]
},
"select": {
"description": "A list of check code prefixes to enable. Prefixes can specify exact checks (like `F841`), entire categories (like `F`), or anything in between.\n\nWhen breaking ties between enabled and disabled checks (via `select` and `ignore`, respectively), more specific prefixes override less specific prefixes.",
"description": "A list of rule codes or prefixes to enable. Prefixes can specify exact rules (like `F841`), entire categories (like `F`), or anything in between.\n\nWhen breaking ties between enabled and disabled rules (via `select` and `ignore`, respectively), more specific prefixes override less specific prefixes.",
"type": [
"array",
"null"
@@ -371,7 +371,7 @@
}
},
"show-source": {
"description": "Whether to show source code snippets when reporting lint error violations (overridden by the `--show-source` command-line flag).",
"description": "Whether to show source code snippets when reporting lint violations (overridden by the `--show-source` command-line flag).",
"type": [
"boolean",
"null"
@@ -399,7 +399,17 @@
]
},
"task-tags": {
"description": "A list of task tags to recognize (e.g., \"TODO\", \"FIXME\", \"XXX\").\n\nComments starting with these tags will be ignored by commented-out code detection (`ERA`), and skipped by line-length checks (`E501`) if `ignore-overlong-task-comments` is set to `true`.",
"description": "A list of task tags to recognize (e.g., \"TODO\", \"FIXME\", \"XXX\").\n\nComments starting with these tags will be ignored by commented-out code detection (`ERA`), and skipped by line-length rules (`E501`) if `ignore-overlong-task-comments` is set to `true`.",
"type": [
"array",
"null"
],
"items": {
"type": "string"
}
},
"typing-modules": {
"description": "A list of modules whose imports should be treated equivalently to members of the `typing` module.\n\nThis is useful for ensuring proper type annotation inference for projects that re-export `typing` and `typing_extensions` members from a compatibility module. If omitted, any members imported from modules apart from `typing` and `typing_extensions` will be treated as ordinary Python objects.",
"type": [
"array",
"null"
@@ -409,7 +419,7 @@
}
},
"unfixable": {
"description": "A list of check code prefixes to consider un-autofix-able.",
"description": "A list of rule codes or prefixes to consider non-autofix-able.",
"type": [
"array",
"null"
@@ -484,14 +494,14 @@
]
},
"suppress-dummy-args": {
"description": "Whether to suppress `ANN000`-level errors for arguments matching the \"dummy\" variable regex (like `_`).",
"description": "Whether to suppress `ANN000`-level violations for arguments matching the \"dummy\" variable regex (like `_`).",
"type": [
"boolean",
"null"
]
},
"suppress-none-returning": {
"description": "Whether to suppress `ANN200`-level errors for functions that meet either of the following criteria:\n\n- Contain no `return` statement. - Explicit `return` statement(s) all return `None` (explicitly or implicitly).",
"description": "Whether to suppress `ANN200`-level violations for functions that meet either of the following criteria:\n\n- Contain no `return` statement. - Explicit `return` statement(s) all return `None` (explicitly or implicitly).",
"type": [
"boolean",
"null"
@@ -530,7 +540,7 @@
"type": "object",
"properties": {
"extend-immutable-calls": {
"description": "Additional callable functions to consider \"immutable\" when evaluating, e.g., `no-mutable-default-argument` checks (`B006`).",
"description": "Additional callable functions to consider \"immutable\" when evaluating, e.g., the `no-mutable-default-argument` rule (`B006`).",
"type": [
"array",
"null"
@@ -587,14 +597,14 @@
"type": "object",
"properties": {
"fixture-parentheses": {
"description": "Boolean flag specifying whether `@pytest.fixture()` without parameters should have parentheses. If the option is set to `true` (the default), `@pytest.fixture()` is valid and `@pytest.fixture` is an error. If set to `false`, `@pytest.fixture` is valid and `@pytest.fixture()` is an error.",
"description": "Boolean flag specifying whether `@pytest.fixture()` without parameters should have parentheses. If the option is set to `true` (the default), `@pytest.fixture()` is valid and `@pytest.fixture` is invalid. If set to `false`, `@pytest.fixture` is valid and `@pytest.fixture()` is invalid.",
"type": [
"boolean",
"null"
]
},
"mark-parentheses": {
"description": "Boolean flag specifying whether `@pytest.mark.foo()` without parameters should have parentheses. If the option is set to `true` (the default), `@pytest.mark.foo()` is valid and `@pytest.mark.foo` is an error. If set to `false`, `@pytest.fixture` is valid and `@pytest.mark.foo()` is an error.",
"description": "Boolean flag specifying whether `@pytest.mark.foo()` without parameters should have parentheses. If the option is set to `true` (the default), `@pytest.mark.foo()` is valid and `@pytest.mark.foo` is invalid. If set to `false`, `@pytest.fixture` is valid and `@pytest.mark.foo()` is invalid.",
"type": [
"boolean",
"null"
@@ -717,7 +727,7 @@
]
},
"banned-api": {
"description": "Specific modules or module members that may not be imported or accessed. Note that this check is only meant to flag accidental uses, and can be circumvented via `eval` or `importlib`.",
"description": "Specific modules or module members that may not be imported or accessed. Note that this rule is only meant to flag accidental uses, and can be circumvented via `eval` or `importlib`.",
"type": [
"object",
"null"
@@ -810,6 +820,16 @@
"null"
]
},
"required-imports": {
"description": "Add the specified import line to all files.",
"type": [
"array",
"null"
],
"items": {
"type": "string"
}
},
"single-line-exclusions": {
"description": "One or more modules to exclude from the single line rule.",
"type": [
@@ -920,7 +940,7 @@
"type": "object",
"properties": {
"ignore-overlong-task-comments": {
"description": "Whether or not line-length checks (`E501`) should be triggered for comments starting with `task-tags` (by default: [\"TODO\", \"FIXME\", and \"XXX\"]).",
"description": "Whether or not line-length violations (`E501`) should be triggered for comments starting with `task-tags` (by default: [\"TODO\", \"FIXME\", and \"XXX\"]).",
"type": [
"boolean",
"null"
@@ -1258,6 +1278,7 @@
"I0",
"I00",
"I001",
"I002",
"I2",
"I25",
"I252",
@@ -1483,6 +1504,8 @@
"S50",
"S501",
"S506",
"S508",
"S509",
"SIM",
"SIM1",
"SIM10",
@@ -1496,6 +1519,7 @@
"SIM11",
"SIM110",
"SIM111",
"SIM112",
"SIM117",
"SIM118",
"SIM2",
@@ -1582,6 +1606,8 @@
"UP027",
"UP028",
"UP029",
"UP03",
"UP030",
"W",
"W2",
"W29",

View File

@@ -1,8 +1,12 @@
[package]
name = "ruff_dev"
version = "0.0.216"
version = "0.0.218"
edition = "2021"
[lib]
name = "ruff_dev"
doctest = false
[dependencies]
anyhow = { version = "1.0.66" }
clap = { version = "4.0.1", features = ["derive"] }

View File

@@ -1,10 +1,11 @@
[package]
name = "ruff_macros"
version = "0.0.216"
version = "0.0.218"
edition = "2021"
[lib]
proc-macro = true
doctest = false
[dependencies]
once_cell = { version = "1.17.0" }

View File

@@ -12,9 +12,12 @@
)]
#![forbid(unsafe_code)]
use syn::{parse_macro_input, DeriveInput};
use proc_macro2::Span;
use quote::quote;
use syn::{parse_macro_input, DeriveInput, Ident};
mod config;
mod prefixes;
mod rule_code_prefix;
#[proc_macro_derive(ConfigurationOptions, attributes(option, doc, option_group))]
@@ -34,3 +37,23 @@ pub fn derive_rule_code_prefix(input: proc_macro::TokenStream) -> proc_macro::To
.unwrap_or_else(syn::Error::into_compile_error)
.into()
}
#[proc_macro]
pub fn origin_by_code(item: proc_macro::TokenStream) -> proc_macro::TokenStream {
let ident = parse_macro_input!(item as Ident).to_string();
let mut iter = prefixes::PREFIX_TO_ORIGIN.iter();
let origin = loop {
let (prefix, origin) = iter
.next()
.unwrap_or_else(|| panic!("code doesn't start with any recognized prefix: {ident}"));
if ident.starts_with(prefix) {
break origin;
}
};
let prefix = Ident::new(origin, Span::call_site());
quote! {
RuleOrigin::#prefix
}
.into()
}

View File

@@ -0,0 +1,53 @@
// Longer prefixes should come first so that you can find an origin for a code
// by simply picking the first entry that starts with the given prefix.
pub const PREFIX_TO_ORIGIN: &[(&str, &str)] = &[
("ANN", "Flake8Annotations"),
("ARG", "Flake8UnusedArguments"),
("A", "Flake8Builtins"),
("BLE", "Flake8BlindExcept"),
("B", "Flake8Bugbear"),
("C4", "Flake8Comprehensions"),
("C9", "McCabe"),
("DTZ", "Flake8Datetimez"),
("D", "Pydocstyle"),
("ERA", "Eradicate"),
("EM", "Flake8ErrMsg"),
("E", "Pycodestyle"),
("FBT", "Flake8BooleanTrap"),
("F", "Pyflakes"),
("ICN", "Flake8ImportConventions"),
("ISC", "Flake8ImplicitStrConcat"),
("I", "Isort"),
("N", "PEP8Naming"),
("PD", "PandasVet"),
("PGH", "PygrepHooks"),
("PL", "Pylint"),
("PT", "Flake8PytestStyle"),
("Q", "Flake8Quotes"),
("RET", "Flake8Return"),
("SIM", "Flake8Simplify"),
("S", "Flake8Bandit"),
("T10", "Flake8Debugger"),
("T20", "Flake8Print"),
("TID", "Flake8TidyImports"),
("UP", "Pyupgrade"),
("W", "Pycodestyle"),
("YTT", "Flake82020"),
("PIE", "Flake8Pie"),
("RUF", "Ruff"),
];
#[cfg(test)]
mod tests {
use super::PREFIX_TO_ORIGIN;
#[test]
fn order() {
for (idx, (prefix, _)) in PREFIX_TO_ORIGIN.iter().enumerate() {
for (prior_prefix, _) in PREFIX_TO_ORIGIN[..idx].iter() {
assert!(!prefix.starts_with(prior_prefix));
}
}
}
}

View File

@@ -1,139 +0,0 @@
#!/usr/bin/env python3
"""Generate boilerplate for a new check.
Example usage:
python scripts/add_check.py \
--name PreferListBuiltin \
--code PIE807 \
--plugin flake8-pie
"""
import argparse
import os
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def dir_name(plugin: str) -> str:
return plugin.replace("-", "_")
def pascal_case(plugin: str) -> str:
"""Convert from snake-case to PascalCase."""
return "".join(word.title() for word in plugin.split("-"))
def snake_case(name: str) -> str:
"""Convert from PascalCase to snake_case."""
return "".join(f"_{word.lower()}" if word.isupper() else word for word in name).lstrip("_")
def main(*, name: str, code: str, plugin: str) -> None:
# Create a test fixture.
with open(
os.path.join(ROOT_DIR, f"resources/test/fixtures/{dir_name(plugin)}/{code}.py"),
"a",
):
pass
# Add the relevant `#testcase` macro.
with open(os.path.join(ROOT_DIR, f"src/{dir_name(plugin)}/mod.rs"), "r") as fp:
content = fp.read()
with open(os.path.join(ROOT_DIR, f"src/{dir_name(plugin)}/mod.rs"), "w") as fp:
for line in content.splitlines():
if line.strip() == "fn rules(check_code: RuleCode, path: &Path) -> Result<()> {":
indent = line.split("fn rules(check_code: RuleCode, path: &Path) -> Result<()> {")[0]
fp.write(f'{indent}#[test_case(RuleCode::{code}, Path::new("{code}.py"); "{code}")]')
fp.write("\n")
fp.write(line)
fp.write("\n")
# Add the relevant plugin function.
with open(os.path.join(ROOT_DIR, f"src/{dir_name(plugin)}/plugins.rs"), "a") as fp:
fp.write(
f"""
/// {code}
pub fn {snake_case(name)}(checker: &mut Checker) {{}}
"""
)
fp.write("\n")
# Add the relevant sections to `src/registry.rs`.
with open(os.path.join(ROOT_DIR, "src/registry.rs"), "r") as fp:
content = fp.read()
index = 0
with open(os.path.join(ROOT_DIR, "src/registry.rs"), "w") as fp:
for line in content.splitlines():
fp.write(line)
fp.write("\n")
if line.strip() == f"// {plugin}":
if index == 0:
# `RuleCode` definition
indent = line.split(f"// {plugin}")[0]
fp.write(f"{indent}{code},")
fp.write("\n")
elif index == 1:
# `DiagnosticKind` definition
indent = line.split(f"// {plugin}")[0]
fp.write(f"{indent}{name},")
fp.write("\n")
elif index == 2:
# `RuleCode#kind()`
indent = line.split(f"// {plugin}")[0]
fp.write(f"{indent}RuleCode::{code} => DiagnosticKind::{name},")
fp.write("\n")
elif index == 3:
# `RuleCode#category()`
indent = line.split(f"// {plugin}")[0]
fp.write(f"{indent}RuleCode::{code} => CheckCategory::{pascal_case(plugin)},")
fp.write("\n")
elif index == 4:
# `DiagnosticKind#code()`
indent = line.split(f"// {plugin}")[0]
fp.write(f"{indent}DiagnosticKind::{name} => &RuleCode::{code},")
fp.write("\n")
elif index == 5:
# `RuleCode#body`
indent = line.split(f"// {plugin}")[0]
fp.write(f'{indent}DiagnosticKind::{name} => todo!("Write message body for {code}"),')
fp.write("\n")
index += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate boilerplate for a new check.",
epilog="python scripts/add_check.py --name PreferListBuiltin --code PIE807 --plugin flake8-pie",
)
parser.add_argument(
"--name",
type=str,
required=True,
help="The name of the check to generate, in PascalCase (e.g., 'LineTooLong').",
)
parser.add_argument(
"--code",
type=str,
required=True,
help="The code of the check to generate (e.g., 'A001').",
)
parser.add_argument(
"--plugin",
type=str,
required=True,
help="The plugin with which the check is associated (e.g., 'flake8-builtins').",
)
args = parser.parse_args()
main(name=args.name, code=args.code, plugin=args.plugin)

View File

@@ -1,5 +1,5 @@
#!/usr/bin/env python3
"""Generate boilerplate for a new plugin.
"""Generate boilerplate for a new Flake8 plugin.
Example usage:
@@ -31,9 +31,9 @@ def main(*, plugin: str, url: str) -> None:
# Create the Rust module.
os.makedirs(os.path.join(ROOT_DIR, f"src/{dir_name(plugin)}"), exist_ok=True)
with open(os.path.join(ROOT_DIR, f"src/{dir_name(plugin)}/rules"), "a"):
pass
with open(os.path.join(ROOT_DIR, f"src/{dir_name(plugin)}/rules"), "w+") as fp:
with open(os.path.join(ROOT_DIR, f"src/{dir_name(plugin)}/rules.rs"), "w+") as fp:
fp.write("use crate::checkers::ast::Checker;\n")
with open(os.path.join(ROOT_DIR, f"src/{dir_name(plugin)}/mod.rs"), "w+") as fp:
fp.write("pub mod rules;\n")
fp.write("\n")
fp.write(
@@ -49,13 +49,13 @@ mod tests {
use crate::linter::test_path;
use crate::settings;
fn rules(check_code: RuleCode, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", check_code.as_ref(), path.to_string_lossy());
fn rules(rule_code: RuleCode, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.as_ref(), path.to_string_lossy());
let diagnostics =test_path(
Path::new("./resources/test/fixtures/%s")
.join(path)
.as_path(),
&settings::Settings::for_rule(check_code),
&settings::Settings::for_rule(rule_code),
)?;
insta::assert_yaml_snapshot!(snapshot, diagnostics);
Ok(())
@@ -67,10 +67,10 @@ mod tests {
# Add the plugin to `lib.rs`.
with open(os.path.join(ROOT_DIR, "src/lib.rs"), "a") as fp:
fp.write(f"pub mod {dir_name(plugin)};")
fp.write(f"mod {dir_name(plugin)};")
# Add the relevant sections to `src/registry.rs`.
with open(os.path.join(ROOT_DIR, "src/registry.rs"), "r") as fp:
with open(os.path.join(ROOT_DIR, "src/registry.rs")) as fp:
content = fp.read()
with open(os.path.join(ROOT_DIR, "src/registry.rs"), "w") as fp:
@@ -85,23 +85,37 @@ mod tests {
fp.write(f"{indent}{pascal_case(plugin)},")
fp.write("\n")
elif line.strip() == 'CheckCategory::Ruff => "Ruff-specific rules",':
indent = line.split('CheckCategory::Ruff => "Ruff-specific rules",')[0]
fp.write(f'{indent}CheckCategory::{pascal_case(plugin)} => "{plugin}",')
elif line.strip() == 'RuleOrigin::Ruff => "Ruff-specific rules",':
indent = line.split('RuleOrigin::Ruff => "Ruff-specific rules",')[0]
fp.write(f'{indent}RuleOrigin::{pascal_case(plugin)} => "{plugin}",')
fp.write("\n")
elif line.strip() == "CheckCategory::Ruff => vec![RuleCodePrefix::RUF],":
indent = line.split("CheckCategory::Ruff => vec![RuleCodePrefix::RUF],")[0]
elif line.strip() == "RuleOrigin::Ruff => vec![RuleCodePrefix::RUF],":
indent = line.split("RuleOrigin::Ruff => vec![RuleCodePrefix::RUF],")[0]
fp.write(
f"{indent}CheckCategory::{pascal_case(plugin)} => vec![\n"
f"{indent}RuleOrigin::{pascal_case(plugin)} => vec![\n"
f'{indent} todo!("Fill-in prefix after generating codes")\n'
f"{indent}],"
)
fp.write("\n")
elif line.strip() == "CheckCategory::Ruff => None,":
indent = line.split("CheckCategory::Ruff => None,")[0]
fp.write(f"{indent}CheckCategory::{pascal_case(plugin)} => " f'Some(("{url}", &Platform::PyPI)),')
elif line.strip() == "RuleOrigin::Ruff => None,":
indent = line.split("RuleOrigin::Ruff => None,")[0]
fp.write(f"{indent}RuleOrigin::{pascal_case(plugin)} => " f'Some(("{url}", &Platform::PyPI)),')
fp.write("\n")
fp.write(line)
fp.write("\n")
# Add the relevant section to `src/violations.rs`.
with open(os.path.join(ROOT_DIR, "src/violations.rs")) as fp:
content = fp.read()
with open(os.path.join(ROOT_DIR, "src/violations.rs"), "w") as fp:
for line in content.splitlines():
if line.strip() == "// Ruff":
indent = line.split("// Ruff")[0]
fp.write(f"{indent}// {plugin}")
fp.write("\n")
fp.write(line)
@@ -110,7 +124,7 @@ mod tests {
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate boilerplate for a new plugin.",
description="Generate boilerplate for a new Flake8 plugin.",
epilog=(
"Example usage: python scripts/add_plugin.py flake8-pie "
"--url https://pypi.org/project/flake8-pie/0.16.0/"
@@ -118,7 +132,6 @@ if __name__ == "__main__":
)
parser.add_argument(
"plugin",
required=True,
type=str,
help="The name of the plugin to generate.",
)

145
scripts/add_rule.py Normal file
View File

@@ -0,0 +1,145 @@
#!/usr/bin/env python3
"""Generate boilerplate for a new rule.
Example usage:
python scripts/add_rule.py \
--name PreferListBuiltin \
--code PIE807 \
--origin flake8-pie
"""
import argparse
import os
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def dir_name(origin: str) -> str:
return origin.replace("-", "_")
def pascal_case(origin: str) -> str:
"""Convert from snake-case to PascalCase."""
return "".join(word.title() for word in origin.split("-"))
def snake_case(name: str) -> str:
"""Convert from PascalCase to snake_case."""
return "".join(f"_{word.lower()}" if word.isupper() else word for word in name).lstrip("_")
def main(*, name: str, code: str, origin: str) -> None:
# Create a test fixture.
with open(
os.path.join(ROOT_DIR, f"resources/test/fixtures/{dir_name(origin)}/{code}.py"),
"a",
):
pass
# Add the relevant `#testcase` macro.
with open(os.path.join(ROOT_DIR, f"src/{dir_name(origin)}/mod.rs")) as fp:
content = fp.read()
with open(os.path.join(ROOT_DIR, f"src/{dir_name(origin)}/mod.rs"), "w") as fp:
for line in content.splitlines():
if line.strip() == "fn rules(rule_code: RuleCode, path: &Path) -> Result<()> {":
indent = line.split("fn rules(rule_code: RuleCode, path: &Path) -> Result<()> {")[0]
fp.write(f'{indent}#[test_case(RuleCode::{code}, Path::new("{code}.py"); "{code}")]')
fp.write("\n")
fp.write(line)
fp.write("\n")
# Add the relevant rule function.
with open(os.path.join(ROOT_DIR, f"src/{dir_name(origin)}/rules.rs"), "a") as fp:
fp.write(
f"""
/// {code}
pub fn {snake_case(name)}(checker: &mut Checker) {{}}
"""
)
fp.write("\n")
# Add the relevant struct to `src/violations.rs`.
with open(os.path.join(ROOT_DIR, "src/violations.rs")) as fp:
content = fp.read()
with open(os.path.join(ROOT_DIR, "src/violations.rs"), "w") as fp:
for line in content.splitlines():
fp.write(line)
fp.write("\n")
if line.startswith(f"// {origin}"):
fp.write(
"""define_violation!(
pub struct %s;
);
impl Violation for %s {
fn message(&self) -> String {
todo!("Implement message")
}
fn placeholder() -> Self {
%s
}
}
"""
% (name, name, name)
)
fp.write("\n")
# Add the relevant code-to-violation pair to `src/registry.rs`.
with open(os.path.join(ROOT_DIR, "src/registry.rs")) as fp:
content = fp.read()
seen_macro = False
has_written = False
with open(os.path.join(ROOT_DIR, "src/registry.rs"), "w") as fp:
for line in content.splitlines():
fp.write(line)
fp.write("\n")
if has_written:
continue
if line.startswith("define_rule_mapping!"):
seen_macro = True
continue
if not seen_macro:
continue
if line.strip() == f"// {origin}":
indent = line.split("//")[0]
fp.write(f"{indent}{code} => violations::{name},")
fp.write("\n")
has_written = True
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate boilerplate for a new rule.",
epilog="python scripts/add_rule.py --name PreferListBuiltin --code PIE807 --origin flake8-pie",
)
parser.add_argument(
"--name",
type=str,
required=True,
help="The name of the check to generate, in PascalCase (e.g., 'LineTooLong').",
)
parser.add_argument(
"--code",
type=str,
required=True,
help="The code of the check to generate (e.g., 'A001').",
)
parser.add_argument(
"--origin",
type=str,
required=True,
help="The source with which the check originated (e.g., 'flake8-builtins').",
)
args = parser.parse_args()
main(name=args.name, code=args.code, origin=args.origin)

View File

@@ -179,6 +179,131 @@ pub fn match_call_path(
}
}
/// Return `true` if the `Expr` contains a reference to `${module}.${target}`.
pub fn contains_call_path(
expr: &Expr,
module: &str,
member: &str,
import_aliases: &FxHashMap<&str, &str>,
from_imports: &FxHashMap<&str, FxHashSet<&str>>,
) -> bool {
any_over_expr(expr, &|expr| {
let call_path = collect_call_paths(expr);
if !call_path.is_empty() {
if match_call_path(
&dealias_call_path(call_path, import_aliases),
module,
member,
from_imports,
) {
return true;
}
}
false
})
}
/// Call `func` over every `Expr` in `expr`, returning `true` if any expression
/// returns `true`..
pub fn any_over_expr<F>(expr: &Expr, func: &F) -> bool
where
F: Fn(&Expr) -> bool,
{
if func(expr) {
return true;
}
match &expr.node {
ExprKind::BoolOp { values, .. } | ExprKind::JoinedStr { values } => {
values.iter().any(|expr| any_over_expr(expr, func))
}
ExprKind::NamedExpr { target, value } => {
any_over_expr(target, func) || any_over_expr(value, func)
}
ExprKind::BinOp { left, right, .. } => {
any_over_expr(left, func) || any_over_expr(right, func)
}
ExprKind::UnaryOp { operand, .. } => any_over_expr(operand, func),
ExprKind::Lambda { body, .. } => any_over_expr(body, func),
ExprKind::IfExp { test, body, orelse } => {
any_over_expr(test, func) || any_over_expr(body, func) || any_over_expr(orelse, func)
}
ExprKind::Dict { keys, values } => values
.iter()
.chain(keys.iter())
.any(|expr| any_over_expr(expr, func)),
ExprKind::Set { elts } | ExprKind::List { elts, .. } | ExprKind::Tuple { elts, .. } => {
elts.iter().any(|expr| any_over_expr(expr, func))
}
ExprKind::ListComp { elt, generators }
| ExprKind::SetComp { elt, generators }
| ExprKind::GeneratorExp { elt, generators } => {
any_over_expr(elt, func)
|| generators.iter().any(|generator| {
any_over_expr(&generator.target, func)
|| any_over_expr(&generator.iter, func)
|| generator.ifs.iter().any(|expr| any_over_expr(expr, func))
})
}
ExprKind::DictComp {
key,
value,
generators,
} => {
any_over_expr(key, func)
|| any_over_expr(value, func)
|| generators.iter().any(|generator| {
any_over_expr(&generator.target, func)
|| any_over_expr(&generator.iter, func)
|| generator.ifs.iter().any(|expr| any_over_expr(expr, func))
})
}
ExprKind::Await { value }
| ExprKind::YieldFrom { value }
| ExprKind::Attribute { value, .. }
| ExprKind::Starred { value, .. } => any_over_expr(value, func),
ExprKind::Yield { value } => value
.as_ref()
.map_or(false, |value| any_over_expr(value, func)),
ExprKind::Compare {
left, comparators, ..
} => any_over_expr(left, func) || comparators.iter().any(|expr| any_over_expr(expr, func)),
ExprKind::Call {
func: call_func,
args,
keywords,
} => {
any_over_expr(call_func, func)
|| args.iter().any(|expr| any_over_expr(expr, func))
|| keywords
.iter()
.any(|keyword| any_over_expr(&keyword.node.value, func))
}
ExprKind::FormattedValue {
value, format_spec, ..
} => {
any_over_expr(value, func)
|| format_spec
.as_ref()
.map_or(false, |value| any_over_expr(value, func))
}
ExprKind::Subscript { value, slice, .. } => {
any_over_expr(value, func) || any_over_expr(slice, func)
}
ExprKind::Slice { lower, upper, step } => {
lower
.as_ref()
.map_or(false, |value| any_over_expr(value, func))
|| upper
.as_ref()
.map_or(false, |value| any_over_expr(value, func))
|| step
.as_ref()
.map_or(false, |value| any_over_expr(value, func))
}
ExprKind::Name { .. } | ExprKind::Constant { .. } => false,
}
}
static DUNDER_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"__[^\s]+__").unwrap());
/// Return `true` if the `Stmt` is an assignment to a dunder (like `__all__`).
@@ -191,12 +316,12 @@ pub fn is_assignment_to_a_dunder(stmt: &Stmt) -> bool {
return false;
}
match &targets[0].node {
ExprKind::Name { id, ctx: _ } => DUNDER_REGEX.is_match(id),
ExprKind::Name { id, .. } => DUNDER_REGEX.is_match(id),
_ => false,
}
}
StmtKind::AnnAssign { target, .. } => match &target.node {
ExprKind::Name { id, ctx: _ } => DUNDER_REGEX.is_match(id),
ExprKind::Name { id, .. } => DUNDER_REGEX.is_match(id),
_ => false,
},
_ => false,
@@ -589,6 +714,21 @@ pub fn followed_by_multi_statement_line(stmt: &Stmt, locator: &SourceCodeLocator
match_trailing_content(stmt, locator)
}
/// Return `true` if a `Stmt` is a docstring.
pub fn is_docstring_stmt(stmt: &Stmt) -> bool {
if let StmtKind::Expr { value } = &stmt.node {
matches!(
value.node,
ExprKind::Constant {
value: Constant::Str { .. },
..
}
)
} else {
false
}
}
#[derive(Default)]
/// A simple representation of a call's positional and keyword arguments.
pub struct SimpleCallArgs<'a> {
@@ -634,6 +774,11 @@ impl<'a> SimpleCallArgs<'a> {
}
None
}
/// Get the number of positional and keyword arguments used.
pub fn len(&self) -> usize {
self.args.len() + self.kwargs.len()
}
}
#[cfg(test)]

View File

@@ -4,6 +4,7 @@ use rustpython_parser::ast::{Constant, Expr, ExprKind, Stmt, StmtKind};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use crate::ast::helpers::any_over_expr;
use crate::ast::types::{Binding, BindingKind, Scope};
use crate::ast::visitor;
use crate::ast::visitor::Visitor;
@@ -129,76 +130,13 @@ pub fn in_nested_block<'a>(mut parents: impl Iterator<Item = &'a Stmt>) -> bool
})
}
/// Returns `true` if `parent` contains `child`.
fn contains(parent: &Expr, child: &Expr) -> bool {
match &parent.node {
ExprKind::BoolOp { values, .. } => values.iter().any(|parent| contains(parent, child)),
ExprKind::NamedExpr { target, value } => contains(target, child) || contains(value, child),
ExprKind::BinOp { left, right, .. } => contains(left, child) || contains(right, child),
ExprKind::UnaryOp { operand, .. } => contains(operand, child),
ExprKind::Lambda { body, .. } => contains(body, child),
ExprKind::IfExp { test, body, orelse } => {
contains(test, child) || contains(body, child) || contains(orelse, child)
}
ExprKind::Dict { keys, values } => keys
.iter()
.chain(values.iter())
.any(|parent| contains(parent, child)),
ExprKind::Set { elts } => elts.iter().any(|parent| contains(parent, child)),
ExprKind::ListComp { elt, .. } => contains(elt, child),
ExprKind::SetComp { elt, .. } => contains(elt, child),
ExprKind::DictComp { key, value, .. } => contains(key, child) || contains(value, child),
ExprKind::GeneratorExp { elt, .. } => contains(elt, child),
ExprKind::Await { value } => contains(value, child),
ExprKind::Yield { value } => value.as_ref().map_or(false, |value| contains(value, child)),
ExprKind::YieldFrom { value } => contains(value, child),
ExprKind::Compare {
left, comparators, ..
} => contains(left, child) || comparators.iter().any(|parent| contains(parent, child)),
ExprKind::Call {
func,
args,
keywords,
} => {
contains(func, child)
|| args.iter().any(|parent| contains(parent, child))
|| keywords
.iter()
.any(|keyword| contains(&keyword.node.value, child))
}
ExprKind::FormattedValue {
value, format_spec, ..
} => {
contains(value, child)
|| format_spec
.as_ref()
.map_or(false, |value| contains(value, child))
}
ExprKind::JoinedStr { values } => values.iter().any(|parent| contains(parent, child)),
ExprKind::Constant { .. } => false,
ExprKind::Attribute { value, .. } => contains(value, child),
ExprKind::Subscript { value, slice, .. } => {
contains(value, child) || contains(slice, child)
}
ExprKind::Starred { value, .. } => contains(value, child),
ExprKind::Name { .. } => parent == child,
ExprKind::List { elts, .. } => elts.iter().any(|parent| contains(parent, child)),
ExprKind::Tuple { elts, .. } => elts.iter().any(|parent| contains(parent, child)),
ExprKind::Slice { lower, upper, step } => {
lower.as_ref().map_or(false, |value| contains(value, child))
|| upper.as_ref().map_or(false, |value| contains(value, child))
|| step.as_ref().map_or(false, |value| contains(value, child))
}
}
}
/// Check if a node represents an unpacking assignment.
pub fn is_unpacking_assignment(parent: &Stmt, child: &Expr) -> bool {
match &parent.node {
StmtKind::With { items, .. } => items.iter().any(|item| {
if let Some(optional_vars) = &item.optional_vars {
if matches!(optional_vars.node, ExprKind::Tuple { .. }) {
if contains(optional_vars, child) {
if any_over_expr(optional_vars, &|expr| expr == child) {
return true;
}
}
@@ -227,7 +165,7 @@ pub fn is_unpacking_assignment(parent: &Stmt, child: &Expr) -> bool {
matches!(
item.node,
ExprKind::Set { .. } | ExprKind::List { .. } | ExprKind::Tuple { .. }
) && contains(item, child)
) && any_over_expr(item, &|expr| expr == child)
});
// If our child is a tuple, and value is not, it's always an unpacking

View File

@@ -174,9 +174,26 @@ impl<'a> Checker<'a> {
/// Return `true` if the call path is a reference to `typing.${target}`.
pub fn match_typing_call_path(&self, call_path: &[&str], target: &str) -> bool {
match_call_path(call_path, "typing", target, &self.from_imports)
|| (typing::in_extensions(target)
&& match_call_path(call_path, "typing_extensions", target, &self.from_imports))
if match_call_path(call_path, "typing", target, &self.from_imports) {
return true;
}
if typing::TYPING_EXTENSIONS.contains(target) {
if match_call_path(call_path, "typing_extensions", target, &self.from_imports) {
return true;
}
}
if self
.settings
.typing_modules
.iter()
.any(|module| match_call_path(call_path, module, target, &self.from_imports))
{
return true;
}
false
}
/// Return the current `Binding` for a given `name`.
@@ -1388,6 +1405,9 @@ where
if self.settings.enabled.contains(&RuleCode::B015) {
flake8_bugbear::rules::useless_comparison(self, value);
}
if self.settings.enabled.contains(&RuleCode::SIM112) {
flake8_simplify::rules::use_capital_environment_variables(self, value);
}
}
_ => {}
}
@@ -1810,6 +1830,8 @@ where
|| self.settings.enabled.contains(&RuleCode::F523)
|| self.settings.enabled.contains(&RuleCode::F524)
|| self.settings.enabled.contains(&RuleCode::F525)
// pyupgrade
|| self.settings.enabled.contains(&RuleCode::UP030)
{
if let ExprKind::Attribute { value, attr, .. } = &func.node {
if let ExprKind::Constant {
@@ -1856,6 +1878,10 @@ where
self, &summary, location,
);
}
if self.settings.enabled.contains(&RuleCode::UP030) {
pyupgrade::rules::format_literals(self, &summary, expr);
}
}
}
}
@@ -1971,6 +1997,28 @@ where
self.diagnostics.push(diagnostic);
}
}
if self.settings.enabled.contains(&RuleCode::S508) {
if let Some(diagnostic) = flake8_bandit::rules::snmp_insecure_version(
func,
args,
keywords,
&self.from_imports,
&self.import_aliases,
) {
self.diagnostics.push(diagnostic);
}
}
if self.settings.enabled.contains(&RuleCode::S509) {
if let Some(diagnostic) = flake8_bandit::rules::snmp_weak_cryptography(
func,
args,
keywords,
&self.from_imports,
&self.import_aliases,
) {
self.diagnostics.push(diagnostic);
}
}
if self.settings.enabled.contains(&RuleCode::S106) {
self.diagnostics
.extend(flake8_bandit::rules::hardcoded_password_func_arg(keywords));
@@ -2954,6 +3002,7 @@ where
value,
&self.from_imports,
&self.import_aliases,
self.settings.typing_modules.iter().map(String::as_str),
|member| self.is_builtin(member),
) {
Some(subscript) => {

View File

@@ -7,33 +7,12 @@ use rustpython_parser::ast::Suite;
use crate::ast::visitor::Visitor;
use crate::directives::IsortDirectives;
use crate::isort;
use crate::isort::track::ImportTracker;
use crate::registry::Diagnostic;
use crate::isort::track::{Block, ImportTracker};
use crate::registry::{Diagnostic, RuleCode};
use crate::settings::{flags, Settings};
use crate::source_code_locator::SourceCodeLocator;
use crate::source_code_style::SourceCodeStyleDetector;
fn check_import_blocks(
tracker: ImportTracker,
locator: &SourceCodeLocator,
settings: &Settings,
stylist: &SourceCodeStyleDetector,
autofix: flags::Autofix,
package: Option<&Path>,
) -> Vec<Diagnostic> {
let mut diagnostics = vec![];
for block in tracker.into_iter() {
if !block.imports.is_empty() {
if let Some(diagnostic) =
isort::rules::check_imports(&block, locator, settings, stylist, autofix, package)
{
diagnostics.push(diagnostic);
}
}
}
diagnostics
}
#[allow(clippy::too_many_arguments)]
pub fn check_imports(
python_ast: &Suite,
@@ -45,9 +24,33 @@ pub fn check_imports(
path: &Path,
package: Option<&Path>,
) -> Vec<Diagnostic> {
let mut tracker = ImportTracker::new(locator, directives, path);
for stmt in python_ast {
tracker.visit_stmt(stmt);
// Extract all imports from the AST.
let tracker = {
let mut tracker = ImportTracker::new(locator, directives, path);
for stmt in python_ast {
tracker.visit_stmt(stmt);
}
tracker
};
let blocks: Vec<&Block> = tracker.iter().collect();
// Enforce import rules.
let mut diagnostics = vec![];
if settings.enabled.contains(&RuleCode::I001) {
for block in &blocks {
if !block.imports.is_empty() {
if let Some(diagnostic) = isort::rules::organize_imports(
block, locator, settings, stylist, autofix, package,
) {
diagnostics.push(diagnostic);
}
}
}
}
check_import_blocks(tracker, locator, settings, stylist, autofix, package)
if settings.enabled.contains(&RuleCode::I002) {
diagnostics.extend(isort::rules::add_required_imports(
&blocks, python_ast, locator, settings, autofix,
));
}
diagnostics
}

View File

@@ -47,7 +47,7 @@ pub fn check_noqa(
continue;
}
// Is the check ignored by a `noqa` directive on the parent line?
// Is the violation ignored by a `noqa` directive on the parent line?
if let Some(parent_lineno) = diagnostic.parent.map(|location| location.row()) {
let noqa_lineno = noqa_line_for.get(&parent_lineno).unwrap_or(&parent_lineno);
if commented_lines.contains(noqa_lineno) {

View File

@@ -25,26 +25,26 @@ pub struct Cli {
/// Enable verbose logging.
#[arg(short, long, group = "verbosity")]
pub verbose: bool,
/// Only log errors.
/// Print lint violations, but nothing else.
#[arg(short, long, group = "verbosity")]
pub quiet: bool,
/// Disable all logging (but still exit with status code "1" upon detecting
/// errors).
/// lint violations).
#[arg(short, long, group = "verbosity")]
pub silent: bool,
/// Exit with status code "0", even upon detecting errors.
/// Exit with status code "0", even upon detecting lint violations.
#[arg(short, long)]
pub exit_zero: bool,
/// Run in watch mode by re-running whenever files change.
#[arg(short, long)]
pub watch: bool,
/// Attempt to automatically fix lint errors.
/// Attempt to automatically fix lint violations.
#[arg(long, overrides_with("no_fix"))]
fix: bool,
#[clap(long, overrides_with("fix"), hide = true)]
no_fix: bool,
/// Fix any fixable lint errors, but don't report on leftover violations.
/// Implies `--fix`.
/// Fix any fixable lint violations, but don't report on leftover
/// violations. Implies `--fix`.
#[arg(long, overrides_with("no_fix_only"))]
fix_only: bool,
#[clap(long, overrides_with("fix_only"), hide = true)]
@@ -63,36 +63,36 @@ pub struct Cli {
/// rules).
#[arg(long, value_delimiter = ',')]
pub select: Option<Vec<RuleCodePrefix>>,
/// Like --select, but adds additional error codes on top of the selected
/// Like --select, but adds additional rule codes on top of the selected
/// ones.
#[arg(long, value_delimiter = ',')]
pub extend_select: Option<Vec<RuleCodePrefix>>,
/// Comma-separated list of error codes to disable.
/// Comma-separated list of rule codes to disable.
#[arg(long, value_delimiter = ',')]
pub ignore: Option<Vec<RuleCodePrefix>>,
/// Like --ignore, but adds additional error codes on top of the ignored
/// Like --ignore, but adds additional rule codes on top of the ignored
/// ones.
#[arg(long, value_delimiter = ',')]
pub extend_ignore: Option<Vec<RuleCodePrefix>>,
/// List of paths, used to exclude files and/or directories from checks.
/// List of paths, used to omit files and/or directories from analysis.
#[arg(long, value_delimiter = ',')]
pub exclude: Option<Vec<FilePattern>>,
/// Like --exclude, but adds additional files and directories on top of the
/// excluded ones.
/// Like --exclude, but adds additional files and directories on top of
/// those already excluded.
#[arg(long, value_delimiter = ',')]
pub extend_exclude: Option<Vec<FilePattern>>,
/// List of error codes to treat as eligible for autofix. Only applicable
/// List of rule codes to treat as eligible for autofix. Only applicable
/// when autofix itself is enabled (e.g., via `--fix`).
#[arg(long, value_delimiter = ',')]
pub fixable: Option<Vec<RuleCodePrefix>>,
/// List of error codes to treat as ineligible for autofix. Only applicable
/// List of rule codes to treat as ineligible for autofix. Only applicable
/// when autofix itself is enabled (e.g., via `--fix`).
#[arg(long, value_delimiter = ',')]
pub unfixable: Option<Vec<RuleCodePrefix>>,
/// List of mappings from file pattern to code to exclude
#[arg(long, value_delimiter = ',')]
pub per_file_ignores: Option<Vec<PatternPrefixPair>>,
/// Output serialization format for error messages.
/// Output serialization format for violations.
#[arg(long, value_enum, env = "RUFF_FORMAT")]
pub format: Option<SerializationFormat>,
/// The name of the file when passing it through stdin.
@@ -129,7 +129,7 @@ pub struct Cli {
/// The minimum Python version that should be supported.
#[arg(long)]
pub target_version: Option<PythonVersion>,
/// Set the line-length for length-associated checks and automatic
/// Set the line-length for length-associated rules and automatic
/// formatting.
#[arg(long)]
pub line_length: Option<usize>,
@@ -212,7 +212,7 @@ pub struct Cli {
conflicts_with = "watch",
)]
pub show_files: bool,
/// See the settings Ruff will use to check a given Python file.
/// See the settings Ruff will use to lint a given Python file.
#[arg(
long,
// Fake subcommands.

View File

@@ -1,5 +1,7 @@
use anyhow::{bail, Result};
use libcst_native::{Expr, Import, ImportFrom, Module, SmallStatement, Statement};
use libcst_native::{
Call, Expr, Expression, Import, ImportFrom, Module, SmallStatement, Statement,
};
pub fn match_module(module_text: &str) -> Result<Module> {
match libcst_native::parse_module(module_text, None) {
@@ -8,6 +10,13 @@ pub fn match_module(module_text: &str) -> Result<Module> {
}
}
pub fn match_expression(expression_text: &str) -> Result<Expression> {
match libcst_native::parse_expression(expression_text) {
Ok(expression) => Ok(expression),
Err(_) => bail!("Failed to extract CST from source"),
}
}
pub fn match_expr<'a, 'b>(module: &'a mut Module<'b>) -> Result<&'a mut Expr<'b>> {
if let Some(Statement::Simple(expr)) = module.body.first_mut() {
if let Some(SmallStatement::Expr(expr)) = expr.body.first_mut() {
@@ -43,3 +52,11 @@ pub fn match_import_from<'a, 'b>(module: &'a mut Module<'b>) -> Result<&'a mut I
bail!("Expected Statement::Simple")
}
}
pub fn match_call<'a, 'b>(expression: &'a mut Expression<'b>) -> Result<&'a mut Call<'b>> {
if let Expression::Call(call) = expression {
Ok(call)
} else {
bail!("Expected SmallStatement::Expr")
}
}

View File

@@ -33,6 +33,7 @@ impl Flags {
pub struct IsortDirectives {
pub exclusions: IntSet<usize>,
pub splits: Vec<usize>,
pub skip_file: bool,
}
pub struct Directives {
@@ -89,17 +90,11 @@ pub fn extract_noqa_line_for(lxr: &[LexResult]) -> IntMap<usize, usize> {
pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
let mut exclusions: IntSet<usize> = IntSet::default();
let mut splits: Vec<usize> = Vec::default();
let mut skip_file: bool = false;
let mut off: Option<Location> = None;
let mut last: Option<Location> = None;
for &(start, ref tok, end) in lxr.iter().flatten() {
last = Some(end);
// No need to keep processing, but we do need to determine the last token.
if skip_file {
continue;
}
let Tok::Comment(comment_text) = tok else {
continue;
};
@@ -111,7 +106,10 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
if comment_text == "# isort: split" {
splits.push(start.row());
} else if comment_text == "# isort: skip_file" || comment_text == "# isort:skip_file" {
skip_file = true;
return IsortDirectives {
skip_file: true,
..IsortDirectives::default()
};
} else if off.is_some() {
if comment_text == "# isort: on" {
if let Some(start) = off {
@@ -130,14 +128,7 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
}
}
if skip_file {
// Enforce `isort: skip_file`.
if let Some(end) = last {
for row in 1..=end.row() {
exclusions.insert(row);
}
}
} else if let Some(start) = off {
if let Some(start) = off {
// Enforce unterminated `isort: off`.
if let Some(end) = last {
for row in start.row() + 1..=end.row() {
@@ -145,7 +136,11 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives {
}
}
}
IsortDirectives { exclusions, splits }
IsortDirectives {
exclusions,
splits,
..IsortDirectives::default()
}
}
#[cfg(test)]
@@ -283,10 +278,7 @@ x = 1
y = 2
z = x + 1";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
assert_eq!(
extract_isort_directives(&lxr).exclusions,
IntSet::from_iter([1, 2, 3, 4])
);
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
let contents = "# isort: off
x = 1
@@ -295,10 +287,7 @@ y = 2
# isort: skip_file
z = x + 1";
let lxr: Vec<LexResult> = lexer::make_tokenizer(contents).collect();
assert_eq!(
extract_isort_directives(&lxr).exclusions,
IntSet::from_iter([1, 2, 3, 4, 5, 6])
);
assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default());
}
#[test]

View File

@@ -26,7 +26,7 @@ pub struct Options {
value_type = "bool",
example = "suppress-dummy-args = true"
)]
/// Whether to suppress `ANN000`-level errors for arguments matching the
/// Whether to suppress `ANN000`-level violations for arguments matching the
/// "dummy" variable regex (like `_`).
pub suppress_dummy_args: Option<bool>,
#[option(
@@ -34,8 +34,8 @@ pub struct Options {
value_type = "bool",
example = "suppress-none-returning = true"
)]
/// Whether to suppress `ANN200`-level errors for functions that meet either
/// of the following criteria:
/// Whether to suppress `ANN200`-level violations for functions that meet
/// either of the following criteria:
///
/// - Contain no `return` statement.
/// - Explicit `return` statement(s) all return `None` (explicitly or

View File

@@ -25,6 +25,8 @@ mod tests {
#[test_case(RuleCode::S324, Path::new("S324.py"); "S324")]
#[test_case(RuleCode::S501, Path::new("S501.py"); "S501")]
#[test_case(RuleCode::S506, Path::new("S506.py"); "S506")]
#[test_case(RuleCode::S508, Path::new("S508.py"); "S508")]
#[test_case(RuleCode::S509, Path::new("S509.py"); "S509")]
fn rules(rule_code: RuleCode, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.as_ref(), path.to_string_lossy());
let diagnostics = test_path(

View File

@@ -11,6 +11,8 @@ pub use hardcoded_tmp_directory::hardcoded_tmp_directory;
pub use hashlib_insecure_hash_functions::hashlib_insecure_hash_functions;
pub use request_with_no_cert_validation::request_with_no_cert_validation;
pub use request_without_timeout::request_without_timeout;
pub use snmp_insecure_version::snmp_insecure_version;
pub use snmp_weak_cryptography::snmp_weak_cryptography;
pub use unsafe_yaml_load::unsafe_yaml_load;
mod assert_used;
@@ -24,4 +26,6 @@ mod hardcoded_tmp_directory;
mod hashlib_insecure_hash_functions;
mod request_with_no_cert_validation;
mod request_without_timeout;
mod snmp_insecure_version;
mod snmp_weak_cryptography;
mod unsafe_yaml_load;

View File

@@ -0,0 +1,40 @@
use num_traits::{One, Zero};
use rustc_hash::{FxHashMap, FxHashSet};
use rustpython_ast::{Expr, ExprKind, Keyword};
use rustpython_parser::ast::Constant;
use crate::ast::helpers::{collect_call_paths, dealias_call_path, match_call_path, SimpleCallArgs};
use crate::ast::types::Range;
use crate::registry::Diagnostic;
use crate::violations;
/// S508
pub fn snmp_insecure_version(
func: &Expr,
args: &[Expr],
keywords: &[Keyword],
from_imports: &FxHashMap<&str, FxHashSet<&str>>,
import_aliases: &FxHashMap<&str, &str>,
) -> Option<Diagnostic> {
let call_path = dealias_call_path(collect_call_paths(func), import_aliases);
if match_call_path(&call_path, "pysnmp.hlapi", "CommunityData", from_imports) {
let call_args = SimpleCallArgs::new(args, keywords);
if let Some(mp_model_arg) = call_args.get_argument("mpModel", None) {
if let ExprKind::Constant {
value: Constant::Int(value),
..
} = &mp_model_arg.node
{
if value.is_zero() || value.is_one() {
return Some(Diagnostic::new(
violations::SnmpInsecureVersion,
Range::from_located(mp_model_arg),
));
}
}
}
}
None
}

View File

@@ -0,0 +1,30 @@
use rustc_hash::{FxHashMap, FxHashSet};
use rustpython_ast::{Expr, Keyword};
use crate::ast::helpers::{collect_call_paths, dealias_call_path, match_call_path, SimpleCallArgs};
use crate::ast::types::Range;
use crate::registry::Diagnostic;
use crate::violations;
/// S509
pub fn snmp_weak_cryptography(
func: &Expr,
args: &[Expr],
keywords: &[Keyword],
from_imports: &FxHashMap<&str, FxHashSet<&str>>,
import_aliases: &FxHashMap<&str, &str>,
) -> Option<Diagnostic> {
let call_path = dealias_call_path(collect_call_paths(func), import_aliases);
if match_call_path(&call_path, "pysnmp.hlapi", "UsmUserData", from_imports) {
let call_args = SimpleCallArgs::new(args, keywords);
if call_args.len() < 3 {
return Some(Diagnostic::new(
violations::SnmpWeakCryptography,
Range::from_located(func),
));
}
}
None
}

View File

@@ -0,0 +1,25 @@
---
source: src/flake8_bandit/mod.rs
expression: diagnostics
---
- kind:
SnmpInsecureVersion: ~
location:
row: 3
column: 32
end_location:
row: 3
column: 33
fix: ~
parent: ~
- kind:
SnmpInsecureVersion: ~
location:
row: 4
column: 32
end_location:
row: 4
column: 33
fix: ~
parent: ~

View File

@@ -0,0 +1,25 @@
---
source: src/flake8_bandit/mod.rs
expression: diagnostics
---
- kind:
SnmpWeakCryptography: ~
location:
row: 4
column: 11
end_location:
row: 4
column: 22
fix: ~
parent: ~
- kind:
SnmpWeakCryptography: ~
location:
row: 5
column: 15
end_location:
row: 5
column: 26
fix: ~
parent: ~

View File

@@ -12,7 +12,9 @@ use crate::violations;
#[derive(Default)]
struct LoadedNamesVisitor<'a> {
// Tuple of: name, defining expression, and defining range.
names: Vec<(&'a str, &'a Expr, Range)>,
loaded: Vec<(&'a str, &'a Expr, Range)>,
// Tuple of: name, defining expression, and defining range.
stored: Vec<(&'a str, &'a Expr, Range)>,
}
/// `Visitor` to collect all used identifiers in a statement.
@@ -22,12 +24,11 @@ where
{
fn visit_expr(&mut self, expr: &'b Expr) {
match &expr.node {
ExprKind::JoinedStr { .. } => {
visitor::walk_expr(self, expr);
}
ExprKind::Name { id, ctx } if matches!(ctx, ExprContext::Load) => {
self.names.push((id, expr, Range::from_located(expr)));
}
ExprKind::Name { id, ctx } => match ctx {
ExprContext::Load => self.loaded.push((id, expr, Range::from_located(expr))),
ExprContext::Store => self.stored.push((id, expr, Range::from_located(expr))),
ExprContext::Del => {}
},
_ => visitor::walk_expr(self, expr),
}
}
@@ -36,6 +37,7 @@ where
#[derive(Default)]
struct SuspiciousVariablesVisitor<'a> {
names: Vec<(&'a str, &'a Expr, Range)>,
safe_functions: Vec<&'a Expr>,
}
/// `Visitor` to collect all suspicious variables (those referenced in
@@ -50,45 +52,90 @@ where
| StmtKind::AsyncFunctionDef { args, body, .. } => {
// Collect all loaded variable names.
let mut visitor = LoadedNamesVisitor::default();
for stmt in body {
visitor.visit_stmt(stmt);
}
visitor.visit_body(body);
// Collect all argument names.
let arg_names = collect_arg_names(args);
let mut arg_names = collect_arg_names(args);
arg_names.extend(visitor.stored.iter().map(|(id, ..)| id));
// Treat any non-arguments as "suspicious".
self.names.extend(
visitor
.names
.into_iter()
.loaded
.iter()
.filter(|(id, ..)| !arg_names.contains(id)),
);
}
_ => visitor::walk_stmt(self, stmt),
StmtKind::Return { value: Some(value) } => {
// Mark `return lambda: x` as safe.
if matches!(value.node, ExprKind::Lambda { .. }) {
self.safe_functions.push(value);
}
}
_ => {}
}
visitor::walk_stmt(self, stmt);
}
fn visit_expr(&mut self, expr: &'b Expr) {
match &expr.node {
ExprKind::Lambda { args, body } => {
// Collect all loaded variable names.
let mut visitor = LoadedNamesVisitor::default();
visitor.visit_expr(body);
// Collect all argument names.
let arg_names = collect_arg_names(args);
// Treat any non-arguments as "suspicious".
self.names.extend(
visitor
.names
.into_iter()
.filter(|(id, ..)| !arg_names.contains(id)),
);
ExprKind::Call {
func,
args,
keywords,
} => {
if let ExprKind::Name { id, .. } = &func.node {
if id == "filter" || id == "reduce" || id == "map" {
for arg in args {
if matches!(arg.node, ExprKind::Lambda { .. }) {
self.safe_functions.push(arg);
}
}
}
}
if let ExprKind::Attribute { value, attr, .. } = &func.node {
if attr == "reduce" {
if let ExprKind::Name { id, .. } = &value.node {
if id == "functools" {
for arg in args {
if matches!(arg.node, ExprKind::Lambda { .. }) {
self.safe_functions.push(arg);
}
}
}
}
}
}
for keyword in keywords {
if keyword.node.arg.as_ref().map_or(false, |arg| arg == "key")
&& matches!(keyword.node.value.node, ExprKind::Lambda { .. })
{
self.safe_functions.push(&keyword.node.value);
}
}
}
_ => visitor::walk_expr(self, expr),
ExprKind::Lambda { args, body } => {
if !self.safe_functions.contains(&expr) {
// Collect all loaded variable names.
let mut visitor = LoadedNamesVisitor::default();
visitor.visit_expr(body);
// Collect all argument names.
let mut arg_names = collect_arg_names(args);
arg_names.extend(visitor.stored.iter().map(|(id, ..)| id));
// Treat any non-arguments as "suspicious".
self.names.extend(
visitor
.loaded
.iter()
.filter(|(id, ..)| !arg_names.contains(id)),
);
}
}
_ => {}
}
visitor::walk_expr(self, expr);
}
}

View File

@@ -22,7 +22,7 @@ pub struct Options {
"#
)]
/// Additional callable functions to consider "immutable" when evaluating,
/// e.g., `no-mutable-default-argument` checks (`B006`).
/// e.g., the `no-mutable-default-argument` rule (`B006`).
pub extend_immutable_calls: Option<Vec<String>>,
}

View File

@@ -1,6 +1,6 @@
---
source: src/flake8_bugbear/mod.rs
expression: checks
expression: diagnostics
---
- kind:
FunctionUsesLoopVariable: x
@@ -172,4 +172,74 @@ expression: checks
column: 16
fix: ~
parent: ~
- kind:
FunctionUsesLoopVariable: x
location:
row: 117
column: 23
end_location:
row: 117
column: 24
fix: ~
parent: ~
- kind:
FunctionUsesLoopVariable: x
location:
row: 118
column: 26
end_location:
row: 118
column: 27
fix: ~
parent: ~
- kind:
FunctionUsesLoopVariable: x
location:
row: 119
column: 36
end_location:
row: 119
column: 37
fix: ~
parent: ~
- kind:
FunctionUsesLoopVariable: x
location:
row: 120
column: 37
end_location:
row: 120
column: 38
fix: ~
parent: ~
- kind:
FunctionUsesLoopVariable: x
location:
row: 121
column: 36
end_location:
row: 121
column: 37
fix: ~
parent: ~
- kind:
FunctionUsesLoopVariable: name
location:
row: 171
column: 28
end_location:
row: 171
column: 32
fix: ~
parent: ~
- kind:
FunctionUsesLoopVariable: i
location:
row: 174
column: 28
end_location:
row: 174
column: 29
fix: ~
parent: ~

View File

@@ -4,7 +4,7 @@ use super::helpers::{
get_mark_decorators, get_mark_name, is_abstractmethod_decorator, is_pytest_fixture,
is_pytest_yield_fixture, keyword_is_literal,
};
use crate::ast::helpers::{collect_arg_names, collect_call_paths, identifier_range};
use crate::ast::helpers::{collect_arg_names, collect_call_paths};
use crate::ast::types::Range;
use crate::ast::visitor;
use crate::ast::visitor::Visitor;
@@ -156,33 +156,19 @@ fn check_fixture_returns(checker: &mut Checker, func: &Stmt, func_name: &str, bo
&& visitor.has_return_with_value
&& func_name.starts_with('_')
{
let mut diagnostic = Diagnostic::new(
checker.diagnostics.push(Diagnostic::new(
violations::IncorrectFixtureNameUnderscore(func_name.to_string()),
Range::from_located(func),
);
if checker.patch(diagnostic.kind.code()) {
let func_name_range = identifier_range(func, checker.locator);
let num_underscores = func_name.len() - func_name.trim_start_matches('_').len();
diagnostic.amend(Fix::deletion(
func_name_range.location,
func_name_range.location.with_col_offset(num_underscores),
));
}
checker.diagnostics.push(diagnostic);
));
} else if checker.settings.enabled.contains(&RuleCode::PT004)
&& !visitor.has_return_with_value
&& !visitor.has_yield_from
&& !func_name.starts_with('_')
{
let mut diagnostic = Diagnostic::new(
checker.diagnostics.push(Diagnostic::new(
violations::MissingFixtureNameUnderscore(func_name.to_string()),
Range::from_located(func),
);
if checker.patch(diagnostic.kind.code()) {
let func_name_range = identifier_range(func, checker.locator);
diagnostic.amend(Fix::insertion("_".to_string(), func_name_range.location));
}
checker.diagnostics.push(diagnostic);
));
}
if checker.settings.enabled.contains(&RuleCode::PT022) {

View File

@@ -36,9 +36,9 @@ pub struct Options {
)]
/// Boolean flag specifying whether `@pytest.fixture()` without parameters
/// should have parentheses. If the option is set to `true` (the
/// default), `@pytest.fixture()` is valid and `@pytest.fixture` is an
/// error. If set to `false`, `@pytest.fixture` is valid and
/// `@pytest.fixture()` is an error.
/// default), `@pytest.fixture()` is valid and `@pytest.fixture` is
/// invalid. If set to `false`, `@pytest.fixture` is valid and
/// `@pytest.fixture()` is invalid.
pub fixture_parentheses: Option<bool>,
#[option(
default = "tuple",
@@ -104,9 +104,9 @@ pub struct Options {
)]
/// Boolean flag specifying whether `@pytest.mark.foo()` without parameters
/// should have parentheses. If the option is set to `true` (the
/// default), `@pytest.mark.foo()` is valid and `@pytest.mark.foo` is an
/// error. If set to `false`, `@pytest.fixture` is valid and
/// `@pytest.mark.foo()` is an error.
/// default), `@pytest.mark.foo()` is valid and `@pytest.mark.foo` is
/// invalid. If set to `false`, `@pytest.fixture` is valid and
/// `@pytest.mark.foo()` is invalid.
pub mark_parentheses: Option<bool>,
}

View File

@@ -10,14 +10,7 @@ expression: diagnostics
end_location:
row: 52
column: 30
fix:
content: _
location:
row: 51
column: 4
end_location:
row: 51
column: 4
fix: ~
parent: ~
- kind:
MissingFixtureNameUnderscore: activate_context
@@ -27,13 +20,6 @@ expression: diagnostics
end_location:
row: 58
column: 13
fix:
content: _
location:
row: 56
column: 4
end_location:
row: 56
column: 4
fix: ~
parent: ~

View File

@@ -10,14 +10,7 @@ expression: diagnostics
end_location:
row: 42
column: 12
fix:
content: ""
location:
row: 41
column: 4
end_location:
row: 41
column: 5
fix: ~
parent: ~
- kind:
IncorrectFixtureNameUnderscore: _activate_context
@@ -27,14 +20,7 @@ expression: diagnostics
end_location:
row: 48
column: 21
fix:
content: ""
location:
row: 46
column: 4
end_location:
row: 46
column: 5
fix: ~
parent: ~
- kind:
IncorrectFixtureNameUnderscore: _activate_context
@@ -44,13 +30,6 @@ expression: diagnostics
end_location:
row: 57
column: 34
fix:
content: ""
location:
row: 52
column: 4
end_location:
row: 52
column: 5
fix: ~
parent: ~

View File

@@ -21,6 +21,7 @@ mod tests {
#[test_case(RuleCode::SIM109, Path::new("SIM109.py"); "SIM109")]
#[test_case(RuleCode::SIM110, Path::new("SIM110.py"); "SIM110")]
#[test_case(RuleCode::SIM111, Path::new("SIM111.py"); "SIM111")]
#[test_case(RuleCode::SIM112, Path::new("SIM112.py"); "SIM112")]
#[test_case(RuleCode::SIM117, Path::new("SIM117.py"); "SIM117")]
#[test_case(RuleCode::SIM201, Path::new("SIM201.py"); "SIM201")]
#[test_case(RuleCode::SIM202, Path::new("SIM202.py"); "SIM202")]

View File

@@ -0,0 +1,106 @@
use rustpython_ast::{Constant, Expr, ExprKind};
use crate::ast::helpers::{create_expr, match_module_member, unparse_expr};
use crate::ast::types::Range;
use crate::autofix::Fix;
use crate::checkers::ast::Checker;
use crate::registry::{Diagnostic, RuleCode};
use crate::violations;
/// SIM112
pub fn use_capital_environment_variables(checker: &mut Checker, expr: &Expr) {
// check `os.environ['foo']`
if let ExprKind::Subscript { .. } = &expr.node {
check_os_environ_subscript(checker, expr);
return;
}
// check `os.environ.get('foo')` and `os.getenv('foo')``
let is_os_environ_get = match_module_member(
expr,
"os.environ",
"get",
&checker.from_imports,
&checker.import_aliases,
);
let is_os_getenv = match_module_member(
expr,
"os",
"getenv",
&checker.from_imports,
&checker.import_aliases,
);
if !(is_os_environ_get || is_os_getenv) {
return;
}
let ExprKind::Call { args, .. } = &expr.node else {
return;
};
let Some(arg) = args.get(0) else {
return;
};
let ExprKind::Constant { value: Constant::Str(env_var), kind } = &arg.node else {
return;
};
let capital_env_var = env_var.to_ascii_uppercase();
if &capital_env_var == env_var {
return;
}
let mut diagnostic = Diagnostic::new(
violations::UseCapitalEnvironmentVariables(capital_env_var.clone(), env_var.clone()),
Range::from_located(arg),
);
if checker.patch(&RuleCode::SIM112) {
let new_env_var = create_expr(ExprKind::Constant {
value: capital_env_var.into(),
kind: kind.clone(),
});
diagnostic.amend(Fix::replacement(
unparse_expr(&new_env_var, checker.style),
arg.location,
arg.end_location.unwrap(),
));
}
checker.diagnostics.push(diagnostic);
}
fn check_os_environ_subscript(checker: &mut Checker, expr: &Expr) {
let ExprKind::Subscript { value, slice, .. } = &expr.node else {
return;
};
let ExprKind::Attribute { value: attr_value, attr, .. } = &value.node else {
return;
};
let ExprKind::Name { id, .. } = &attr_value.node else {
return;
};
if id != "os" || attr != "environ" {
return;
}
let ExprKind::Constant { value: Constant::Str(env_var), kind } = &slice.node else {
return;
};
let capital_env_var = env_var.to_ascii_uppercase();
if &capital_env_var == env_var {
return;
}
let mut diagnostic = Diagnostic::new(
violations::UseCapitalEnvironmentVariables(capital_env_var.clone(), env_var.clone()),
Range::from_located(slice),
);
if checker.patch(&RuleCode::SIM112) {
let new_env_var = create_expr(ExprKind::Constant {
value: capital_env_var.into(),
kind: kind.clone(),
});
diagnostic.amend(Fix::replacement(
unparse_expr(&new_env_var, checker.style),
slice.location,
slice.end_location.unwrap(),
));
}
checker.diagnostics.push(diagnostic);
}

View File

@@ -1,6 +1,8 @@
use rustpython_ast::{Constant, Expr, ExprKind, Stmt, StmtKind};
use crate::ast::helpers::{create_expr, create_stmt, unparse_expr, unparse_stmt};
use crate::ast::helpers::{
contains_call_path, create_expr, create_stmt, unparse_expr, unparse_stmt,
};
use crate::ast::types::Range;
use crate::autofix::Fix;
use crate::checkers::ast::Checker;
@@ -144,7 +146,28 @@ pub fn use_ternary_operator(checker: &mut Checker, stmt: &Stmt, parent: Option<&
return;
}
let target_var = &body_targets[0];
// Avoid suggesting ternary for `if sys.version_info >= ...`-style checks.
if contains_call_path(
test,
"sys",
"version_info",
&checker.import_aliases,
&checker.from_imports,
) {
return;
}
// Avoid suggesting ternary for `if sys.platform.startswith("...")`-style
// checks.
if contains_call_path(
test,
"sys",
"platform",
&checker.import_aliases,
&checker.from_imports,
) {
return;
}
// It's part of a bigger if-elif block:
// https://github.com/MartinThoma/flake8-simplify/issues/115
@@ -176,6 +199,7 @@ pub fn use_ternary_operator(checker: &mut Checker, stmt: &Stmt, parent: Option<&
}
}
let target_var = &body_targets[0];
let ternary = ternary(target_var, body_value, test, orelse_value);
let content = unparse_stmt(&ternary, checker.style);
let mut diagnostic = Diagnostic::new(

View File

@@ -1,6 +1,7 @@
pub use ast_bool_op::{
a_and_not_a, a_or_not_a, and_false, compare_with_tuple, duplicate_isinstance_call, or_true,
};
pub use ast_expr::use_capital_environment_variables;
pub use ast_for::convert_loop_to_any_all;
pub use ast_if::{nested_if_statements, return_bool_condition_directly, use_ternary_operator};
pub use ast_ifexp::{
@@ -14,6 +15,7 @@ pub use use_contextlib_suppress::use_contextlib_suppress;
pub use yoda_conditions::yoda_conditions;
mod ast_bool_op;
mod ast_expr;
mod ast_for;
mod ast_if;
mod ast_ifexp;

View File

@@ -0,0 +1,81 @@
---
source: src/flake8_simplify/mod.rs
expression: diagnostics
---
- kind:
UseCapitalEnvironmentVariables:
- FOO
- foo
location:
row: 4
column: 11
end_location:
row: 4
column: 16
fix:
content: "'FOO'"
location:
row: 4
column: 11
end_location:
row: 4
column: 16
parent: ~
- kind:
UseCapitalEnvironmentVariables:
- FOO
- foo
location:
row: 6
column: 15
end_location:
row: 6
column: 20
fix:
content: "'FOO'"
location:
row: 6
column: 15
end_location:
row: 6
column: 20
parent: ~
- kind:
UseCapitalEnvironmentVariables:
- FOO
- foo
location:
row: 8
column: 15
end_location:
row: 8
column: 20
fix:
content: "'FOO'"
location:
row: 8
column: 15
end_location:
row: 8
column: 20
parent: ~
- kind:
UseCapitalEnvironmentVariables:
- FOO
- foo
location:
row: 10
column: 10
end_location:
row: 10
column: 15
fix:
content: "'FOO'"
location:
row: 10
column: 10
end_location:
row: 10
column: 15
parent: ~

View File

@@ -54,7 +54,7 @@ pub struct Options {
"#
)]
/// Specific modules or module members that may not be imported or accessed.
/// Note that this check is only meant to flag accidental uses,
/// Note that this rule is only meant to flag accidental uses,
/// and can be circumvented via `eval` or `importlib`.
pub banned_api: Option<FxHashMap<String, BannedApi>>,
}

View File

@@ -1,19 +1,6 @@
use rustpython_ast::{Constant, ExprKind, Stmt, StmtKind};
/// Return `true` if a `Stmt` is a docstring.
fn is_docstring_stmt(stmt: &Stmt) -> bool {
if let StmtKind::Expr { value } = &stmt.node {
matches!(
value.node,
ExprKind::Constant {
value: Constant::Str { .. },
..
}
)
} else {
false
}
}
use crate::ast::helpers::is_docstring_stmt;
/// Return `true` if a `Stmt` is a "empty": a `pass`, `...`, `raise
/// NotImplementedError`, or `raise NotImplemented` (with or without arguments).

View File

@@ -1,7 +1,8 @@
use rustpython_ast::Stmt;
use rustpython_ast::{Location, Stmt};
use rustpython_parser::lexer;
use rustpython_parser::lexer::Tok;
use crate::ast::helpers::is_docstring_stmt;
use crate::ast::types::Range;
use crate::isort::types::TrailingComma;
use crate::source_code_locator::SourceCodeLocator;
@@ -86,3 +87,122 @@ pub fn has_comment_break(stmt: &Stmt, locator: &SourceCodeLocator) -> bool {
}
false
}
/// Find the end of the last docstring.
fn match_docstring_end(body: &[Stmt]) -> Option<Location> {
let mut iter = body.iter();
let Some(mut stmt) = iter.next() else {
return None;
};
if !is_docstring_stmt(stmt) {
return None;
}
for next in iter {
if !is_docstring_stmt(next) {
break;
}
stmt = next;
}
Some(stmt.end_location.unwrap())
}
/// Find the end of the first token that isn't a docstring, comment, or
/// whitespace.
pub fn find_splice_location(body: &[Stmt], locator: &SourceCodeLocator) -> Location {
// Find the first AST node that isn't a docstring.
let mut splice = match_docstring_end(body).unwrap_or_default();
// Find the first token that isn't a comment or whitespace.
let contents = locator.slice_source_code_at(&splice);
for (.., tok, end) in lexer::make_tokenizer(&contents).flatten() {
if matches!(tok, Tok::Comment(..) | Tok::Newline) {
splice = end;
} else {
break;
}
}
splice
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use rustpython_ast::Location;
use rustpython_parser::parser;
use crate::isort::helpers::find_splice_location;
use crate::source_code_locator::SourceCodeLocator;
fn splice_contents(contents: &str) -> Result<Location> {
let program = parser::parse_program(contents, "<filename>")?;
let locator = SourceCodeLocator::new(contents);
Ok(find_splice_location(&program, &locator))
}
#[test]
fn splice() -> Result<()> {
let contents = "";
assert_eq!(splice_contents(contents)?, Location::new(1, 0));
let contents = r#"
"""Hello, world!"""
"#
.trim();
assert_eq!(splice_contents(contents)?, Location::new(1, 19));
let contents = r#"
"""Hello, world!"""
"""Hello, world!"""
"#
.trim();
assert_eq!(splice_contents(contents)?, Location::new(2, 19));
let contents = r#"
x = 1
"#
.trim();
assert_eq!(splice_contents(contents)?, Location::new(1, 0));
let contents = r#"
#!/usr/bin/env python3
"#
.trim();
assert_eq!(splice_contents(contents)?, Location::new(1, 22));
let contents = r#"
#!/usr/bin/env python3
"""Hello, world!"""
"#
.trim();
assert_eq!(splice_contents(contents)?, Location::new(2, 19));
let contents = r#"
"""Hello, world!"""
#!/usr/bin/env python3
"#
.trim();
assert_eq!(splice_contents(contents)?, Location::new(2, 22));
let contents = r#"
"""%s""" % "Hello, world!"
"#
.trim();
assert_eq!(splice_contents(contents)?, Location::new(1, 0));
let contents = r#"
"""Hello, world!"""; x = 1
"#
.trim();
assert_eq!(splice_contents(contents)?, Location::new(1, 19));
let contents = r#"
"""Hello, world!"""; x = 1; y = \
2
"#
.trim();
assert_eq!(splice_contents(contents)?, Location::new(1, 19));
Ok(())
}
}

View File

@@ -828,4 +828,99 @@ mod tests {
insta::assert_yaml_snapshot!(snapshot, diagnostics);
Ok(())
}
#[test_case(Path::new("docstring.py"))]
#[test_case(Path::new("docstring_only.py"))]
#[test_case(Path::new("empty.py"))]
fn required_import(path: &Path) -> Result<()> {
let snapshot = format!("required_import_{}", path.to_string_lossy());
let diagnostics = test_path(
Path::new("./resources/test/fixtures/isort/required_imports")
.join(path)
.as_path(),
&Settings {
src: vec![Path::new("resources/test/fixtures/isort").to_path_buf()],
isort: isort::settings::Settings {
required_imports: BTreeSet::from([
"from __future__ import annotations".to_string()
]),
..isort::settings::Settings::default()
},
..Settings::for_rule(RuleCode::I002)
},
)?;
insta::assert_yaml_snapshot!(snapshot, diagnostics);
Ok(())
}
#[test_case(Path::new("docstring.py"))]
#[test_case(Path::new("docstring_only.py"))]
#[test_case(Path::new("empty.py"))]
fn required_imports(path: &Path) -> Result<()> {
let snapshot = format!("required_imports_{}", path.to_string_lossy());
let diagnostics = test_path(
Path::new("./resources/test/fixtures/isort/required_imports")
.join(path)
.as_path(),
&Settings {
src: vec![Path::new("resources/test/fixtures/isort").to_path_buf()],
isort: isort::settings::Settings {
required_imports: BTreeSet::from([
"from __future__ import annotations".to_string(),
"from __future__ import generator_stop".to_string(),
]),
..isort::settings::Settings::default()
},
..Settings::for_rule(RuleCode::I002)
},
)?;
insta::assert_yaml_snapshot!(snapshot, diagnostics);
Ok(())
}
#[test_case(Path::new("docstring.py"))]
#[test_case(Path::new("docstring_only.py"))]
#[test_case(Path::new("empty.py"))]
fn combined_required_imports(path: &Path) -> Result<()> {
let snapshot = format!("combined_required_imports_{}", path.to_string_lossy());
let diagnostics = test_path(
Path::new("./resources/test/fixtures/isort/required_imports")
.join(path)
.as_path(),
&Settings {
src: vec![Path::new("resources/test/fixtures/isort").to_path_buf()],
isort: isort::settings::Settings {
required_imports: BTreeSet::from(["from __future__ import annotations, \
generator_stop"
.to_string()]),
..isort::settings::Settings::default()
},
..Settings::for_rule(RuleCode::I002)
},
)?;
insta::assert_yaml_snapshot!(snapshot, diagnostics);
Ok(())
}
#[test_case(Path::new("docstring.py"))]
#[test_case(Path::new("docstring_only.py"))]
#[test_case(Path::new("empty.py"))]
fn straight_required_import(path: &Path) -> Result<()> {
let snapshot = format!("straight_required_import_{}", path.to_string_lossy());
let diagnostics = test_path(
Path::new("./resources/test/fixtures/isort/required_imports")
.join(path)
.as_path(),
&Settings {
src: vec![Path::new("resources/test/fixtures/isort").to_path_buf()],
isort: isort::settings::Settings {
required_imports: BTreeSet::from(["import os".to_string()]),
..isort::settings::Settings::default()
},
..Settings::for_rule(RuleCode::I002)
},
)?;
insta::assert_yaml_snapshot!(snapshot, diagnostics);
Ok(())
}
}

View File

@@ -0,0 +1,222 @@
use std::fmt;
use log::error;
use rustpython_ast::{Location, StmtKind, Suite};
use crate::ast::helpers::is_docstring_stmt;
use crate::ast::types::Range;
use crate::autofix::Fix;
use crate::isort::helpers;
use crate::isort::track::Block;
use crate::registry::{Diagnostic, RuleCode};
use crate::settings::{flags, Settings};
use crate::source_code_locator::SourceCodeLocator;
use crate::violations;
struct Alias<'a> {
name: &'a str,
as_name: Option<&'a str>,
}
struct ImportFrom<'a> {
module: Option<&'a str>,
name: Alias<'a>,
level: Option<&'a usize>,
}
struct Import<'a> {
name: Alias<'a>,
}
enum AnyImport<'a> {
Import(Import<'a>),
ImportFrom(ImportFrom<'a>),
}
impl fmt::Display for ImportFrom<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "from ")?;
if let Some(level) = self.level {
write!(f, "{}", ".".repeat(*level))?;
}
if let Some(module) = self.module {
write!(f, "{module}")?;
}
write!(f, " import {}", self.name.name)?;
Ok(())
}
}
impl fmt::Display for Import<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "import {}", self.name.name)?;
if let Some(as_name) = self.name.as_name {
write!(f, " as {as_name}")?;
}
Ok(())
}
}
impl fmt::Display for AnyImport<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
AnyImport::Import(import) => write!(f, "{import}"),
AnyImport::ImportFrom(import_from) => write!(f, "{import_from}"),
}
}
}
fn contains(block: &Block, required_import: &AnyImport) -> bool {
block.imports.iter().any(|import| match required_import {
AnyImport::Import(required_import) => {
let StmtKind::Import {
names,
} = &import.node else {
return false;
};
names.iter().any(|alias| {
alias.node.name == required_import.name.name
&& alias.node.asname.as_deref() == required_import.name.as_name
})
}
AnyImport::ImportFrom(required_import) => {
let StmtKind::ImportFrom {
module,
names,
level,
} = &import.node else {
return false;
};
module.as_deref() == required_import.module
&& level.as_ref() == required_import.level
&& names.iter().any(|alias| {
alias.node.name == required_import.name.name
&& alias.node.asname.as_deref() == required_import.name.as_name
})
}
})
}
fn add_required_import(
required_import: &AnyImport,
blocks: &[&Block],
python_ast: &Suite,
locator: &SourceCodeLocator,
settings: &Settings,
autofix: flags::Autofix,
) -> Option<Diagnostic> {
// If the import is already present in a top-level block, don't add it.
if blocks
.iter()
.filter(|block| !block.nested)
.any(|block| contains(block, required_import))
{
return None;
}
// Don't add imports to semantically-empty files.
if python_ast.iter().all(is_docstring_stmt) {
return None;
}
// Always insert the diagnostic at top-of-file.
let required_import = required_import.to_string();
let mut diagnostic = Diagnostic::new(
violations::MissingRequiredImport(required_import.clone()),
Range::new(Location::default(), Location::default()),
);
if matches!(autofix, flags::Autofix::Enabled) && settings.fixable.contains(&RuleCode::I002) {
// Determine the location at which the import should be inserted.
let splice = helpers::find_splice_location(python_ast, locator);
// Generate the edit.
let mut contents = String::with_capacity(required_import.len() + 1);
// If we're inserting beyond the start of the file, we add
// a newline _before_, since the splice represents the _end_ of the last
// irrelevant token (e.g., the end of a comment or the end of
// docstring). This ensures that we properly handle awkward cases like
// docstrings that are followed by semicolons.
if splice > Location::default() {
contents.push('\n');
}
contents.push_str(&required_import);
// If we're inserting at the start of the file, add a trailing newline instead.
if splice == Location::default() {
contents.push('\n');
}
// Construct the fix.
diagnostic.amend(Fix::insertion(contents, splice));
}
Some(diagnostic)
}
/// I002
pub fn add_required_imports(
blocks: &[&Block],
python_ast: &Suite,
locator: &SourceCodeLocator,
settings: &Settings,
autofix: flags::Autofix,
) -> Vec<Diagnostic> {
settings
.isort
.required_imports
.iter()
.flat_map(|required_import| {
let Ok(body) = rustpython_parser::parser::parse_program(required_import, "<filename>") else {
error!("Failed to parse required import: `{}`", required_import);
return vec![];
};
if body.is_empty() || body.len() > 1 {
error!("Expected require import to contain a single statement: `{}`", required_import);
return vec![];
}
match &body[0].node {
StmtKind::ImportFrom { module, names, level } => {
names.iter().filter_map(|name| {
add_required_import(
&AnyImport::ImportFrom(ImportFrom {
module: module.as_ref().map(String::as_str),
name: Alias {
name: name.node.name.as_str(),
as_name: name.node.asname.as_deref(),
},
level: level.as_ref(),
}),
blocks,
python_ast,
locator,
settings,
autofix,
)
}).collect()
}
StmtKind::Import { names } => {
names.iter().filter_map(|name| {
add_required_import(
&AnyImport::Import(Import {
name: Alias {
name: name.node.name.as_str(),
as_name: name.node.asname.as_deref(),
},
}),
blocks,
python_ast,
locator,
settings,
autofix,
)
}).collect()
}
_ => {
error!("Expected required import to be in import-from style: `{}`", required_import);
vec![]
}
}
})
.collect()
}

5
src/isort/rules/mod.rs Normal file
View File

@@ -0,0 +1,5 @@
pub use add_required_imports::add_required_imports;
pub use organize_imports::organize_imports;
pub mod add_required_imports;
pub mod organize_imports;

View File

@@ -27,7 +27,7 @@ fn extract_indentation_range(body: &[&Stmt]) -> Range {
}
/// I001
pub fn check_imports(
pub fn organize_imports(
block: &Block,
locator: &SourceCodeLocator,
settings: &Settings,

View File

@@ -129,36 +129,47 @@ pub struct Options {
/// A list of modules to consider standard-library, in addition to those
/// known to Ruff in advance.
pub extra_standard_library: Option<Vec<String>>,
#[option(
default = r#"[]"#,
value_type = "Vec<String>",
example = r#"
add-import = ["from __future__ import annotations"]
"#
)]
/// Add the specified import line to all files.
pub required_imports: Option<Vec<String>>,
}
#[derive(Debug, Hash)]
#[allow(clippy::struct_excessive_bools)]
pub struct Settings {
pub required_imports: BTreeSet<String>,
pub combine_as_imports: bool,
pub force_wrap_aliases: bool,
pub split_on_trailing_comma: bool,
pub extra_standard_library: BTreeSet<String>,
pub force_single_line: bool,
pub order_by_type: bool,
pub force_sort_within_sections: bool,
pub single_line_exclusions: BTreeSet<String>,
pub force_wrap_aliases: bool,
pub known_first_party: BTreeSet<String>,
pub known_third_party: BTreeSet<String>,
pub extra_standard_library: BTreeSet<String>,
pub order_by_type: bool,
pub single_line_exclusions: BTreeSet<String>,
pub split_on_trailing_comma: bool,
}
impl Default for Settings {
fn default() -> Self {
Self {
required_imports: BTreeSet::new(),
combine_as_imports: false,
force_wrap_aliases: false,
split_on_trailing_comma: true,
extra_standard_library: BTreeSet::new(),
force_single_line: false,
order_by_type: true,
force_sort_within_sections: false,
single_line_exclusions: BTreeSet::new(),
force_wrap_aliases: false,
known_first_party: BTreeSet::new(),
known_third_party: BTreeSet::new(),
extra_standard_library: BTreeSet::new(),
order_by_type: true,
single_line_exclusions: BTreeSet::new(),
split_on_trailing_comma: true,
}
}
}
@@ -166,20 +177,21 @@ impl Default for Settings {
impl From<Options> for Settings {
fn from(options: Options) -> Self {
Self {
required_imports: BTreeSet::from_iter(options.required_imports.unwrap_or_default()),
combine_as_imports: options.combine_as_imports.unwrap_or(false),
force_wrap_aliases: options.force_wrap_aliases.unwrap_or(false),
split_on_trailing_comma: options.split_on_trailing_comma.unwrap_or(true),
force_single_line: options.force_single_line.unwrap_or(false),
order_by_type: options.order_by_type.unwrap_or(true),
force_sort_within_sections: options.force_sort_within_sections.unwrap_or(false),
single_line_exclusions: BTreeSet::from_iter(
options.single_line_exclusions.unwrap_or_default(),
),
known_first_party: BTreeSet::from_iter(options.known_first_party.unwrap_or_default()),
known_third_party: BTreeSet::from_iter(options.known_third_party.unwrap_or_default()),
extra_standard_library: BTreeSet::from_iter(
options.extra_standard_library.unwrap_or_default(),
),
force_single_line: options.force_single_line.unwrap_or(false),
force_sort_within_sections: options.force_sort_within_sections.unwrap_or(false),
force_wrap_aliases: options.force_wrap_aliases.unwrap_or(false),
known_first_party: BTreeSet::from_iter(options.known_first_party.unwrap_or_default()),
known_third_party: BTreeSet::from_iter(options.known_third_party.unwrap_or_default()),
order_by_type: options.order_by_type.unwrap_or(true),
single_line_exclusions: BTreeSet::from_iter(
options.single_line_exclusions.unwrap_or_default(),
),
split_on_trailing_comma: options.split_on_trailing_comma.unwrap_or(true),
}
}
}
@@ -187,16 +199,17 @@ impl From<Options> for Settings {
impl From<Settings> for Options {
fn from(settings: Settings) -> Self {
Self {
required_imports: Some(settings.required_imports.into_iter().collect()),
combine_as_imports: Some(settings.combine_as_imports),
force_wrap_aliases: Some(settings.force_wrap_aliases),
split_on_trailing_comma: Some(settings.split_on_trailing_comma),
extra_standard_library: Some(settings.extra_standard_library.into_iter().collect()),
force_single_line: Some(settings.force_single_line),
order_by_type: Some(settings.order_by_type),
force_sort_within_sections: Some(settings.force_sort_within_sections),
single_line_exclusions: Some(settings.single_line_exclusions.into_iter().collect()),
force_wrap_aliases: Some(settings.force_wrap_aliases),
known_first_party: Some(settings.known_first_party.into_iter().collect()),
known_third_party: Some(settings.known_third_party.into_iter().collect()),
extra_standard_library: Some(settings.extra_standard_library.into_iter().collect()),
order_by_type: Some(settings.order_by_type),
single_line_exclusions: Some(settings.single_line_exclusions.into_iter().collect()),
split_on_trailing_comma: Some(settings.split_on_trailing_comma),
}
}
}

View File

@@ -0,0 +1,39 @@
---
source: src/isort/mod.rs
expression: diagnostics
---
- kind:
MissingRequiredImport: from __future__ import annotations
location:
row: 1
column: 0
end_location:
row: 1
column: 0
fix:
content: "\nfrom __future__ import annotations"
location:
row: 1
column: 19
end_location:
row: 1
column: 19
parent: ~
- kind:
MissingRequiredImport: from __future__ import generator_stop
location:
row: 1
column: 0
end_location:
row: 1
column: 0
fix:
content: "\nfrom __future__ import generator_stop"
location:
row: 1
column: 19
end_location:
row: 1
column: 19
parent: ~

View File

@@ -0,0 +1,6 @@
---
source: src/isort/mod.rs
expression: diagnostics
---
[]

View File

@@ -0,0 +1,6 @@
---
source: src/isort/mod.rs
expression: diagnostics
---
[]

View File

@@ -0,0 +1,22 @@
---
source: src/isort/mod.rs
expression: diagnostics
---
- kind:
MissingRequiredImport: from __future__ import annotations
location:
row: 1
column: 0
end_location:
row: 1
column: 0
fix:
content: "\nfrom __future__ import annotations"
location:
row: 1
column: 19
end_location:
row: 1
column: 19
parent: ~

View File

@@ -0,0 +1,6 @@
---
source: src/isort/mod.rs
expression: diagnostics
---
[]

View File

@@ -0,0 +1,6 @@
---
source: src/isort/mod.rs
expression: diagnostics
---
[]

View File

@@ -0,0 +1,39 @@
---
source: src/isort/mod.rs
expression: diagnostics
---
- kind:
MissingRequiredImport: from __future__ import annotations
location:
row: 1
column: 0
end_location:
row: 1
column: 0
fix:
content: "\nfrom __future__ import annotations"
location:
row: 1
column: 19
end_location:
row: 1
column: 19
parent: ~
- kind:
MissingRequiredImport: from __future__ import generator_stop
location:
row: 1
column: 0
end_location:
row: 1
column: 0
fix:
content: "\nfrom __future__ import generator_stop"
location:
row: 1
column: 19
end_location:
row: 1
column: 19
parent: ~

View File

@@ -0,0 +1,6 @@
---
source: src/isort/mod.rs
expression: diagnostics
---
[]

View File

@@ -0,0 +1,6 @@
---
source: src/isort/mod.rs
expression: diagnostics
---
[]

View File

@@ -0,0 +1,22 @@
---
source: src/isort/mod.rs
expression: diagnostics
---
- kind:
MissingRequiredImport: import os
location:
row: 1
column: 0
end_location:
row: 1
column: 0
fix:
content: "\nimport os"
location:
row: 1
column: 19
end_location:
row: 1
column: 19
parent: ~

View File

@@ -0,0 +1,6 @@
---
source: src/isort/mod.rs
expression: diagnostics
---
[]

View File

@@ -0,0 +1,6 @@
---
source: src/isort/mod.rs
expression: diagnostics
---
[]

View File

@@ -20,6 +20,7 @@ pub enum Trailer {
#[derive(Debug, Default)]
pub struct Block<'a> {
pub nested: bool,
pub imports: Vec<&'a Stmt>,
pub trailer: Option<Trailer>,
}
@@ -52,6 +53,7 @@ impl<'a> ImportTracker<'a> {
fn track_import(&mut self, stmt: &'a Stmt) {
let index = self.blocks.len() - 1;
self.blocks[index].imports.push(stmt);
self.blocks[index].nested = self.nested;
}
fn trailer_for(&self, stmt: &'a Stmt) -> Option<Trailer> {
@@ -105,8 +107,11 @@ impl<'a> ImportTracker<'a> {
}
}
pub fn into_iter(self) -> impl IntoIterator<Item = Block<'a>> {
self.blocks.into_iter()
pub fn iter<'b>(&'a self) -> impl Iterator<Item = &'b Block<'a>>
where
'a: 'b,
{
self.blocks.iter()
}
}

View File

@@ -24,7 +24,7 @@ const VERSION: &str = env!("CARGO_PKG_VERSION");
#[wasm_bindgen(typescript_custom_section)]
const TYPES: &'static str = r#"
export interface Check {
export interface Diagnostic {
code: string;
message: string;
location: {
@@ -112,6 +112,7 @@ pub fn defaultSettings() -> Result<JsValue, JsValue> {
show_source: None,
src: None,
unfixable: None,
typing_modules: None,
task_tags: None,
update_check: None,
// Use default options for all plugins.

View File

@@ -84,10 +84,11 @@ pub(crate) fn check_path(
.enabled
.iter()
.any(|rule_code| matches!(rule_code.lint_source(), LintSource::AST));
let use_imports = settings
.enabled
.iter()
.any(|rule_code| matches!(rule_code.lint_source(), LintSource::Imports));
let use_imports = !directives.isort.skip_file
&& settings
.enabled
.iter()
.any(|rule_code| matches!(rule_code.lint_source(), LintSource::Imports));
if use_ast || use_imports {
match rustpython_helpers::parse_program_tokens(tokens, "<filename>") {
Ok(python_ast) => {

View File

@@ -169,7 +169,7 @@ fn add_noqa_inner(
count += 1;
}
Directive::Codes(_, start, _, existing) => {
// Reconstruct the line based on the preserved check codes.
// Reconstruct the line based on the preserved rule codes.
// This enables us to tally the number of edits.
let mut formatted = String::new();

View File

@@ -16,7 +16,7 @@ pub struct Options {
ignore-overlong-task-comments = true
"#
)]
/// Whether or not line-length checks (`E501`) should be triggered for
/// Whether or not line-length violations (`E501`) should be triggered for
/// comments starting with `task-tags` (by default: ["TODO", "FIXME",
/// and "XXX"]).
pub ignore_overlong_task_comments: Option<bool>,

View File

@@ -1,7 +1,6 @@
//! Implements helper functions for using vendored/format.rs
use std::convert::TryFrom;
use rustc_hash::FxHashSet;
use rustpython_common::format::{
FieldName, FieldType, FormatParseError, FormatPart, FormatString, FromTemplate,
};
@@ -21,10 +20,12 @@ pub(crate) fn error_to_string(err: &FormatParseError) -> String {
.to_string()
}
#[derive(Debug)]
pub(crate) struct FormatSummary {
pub autos: FxHashSet<usize>,
pub indexes: FxHashSet<usize>,
pub keywords: FxHashSet<String>,
pub autos: Vec<usize>,
pub indexes: Vec<usize>,
pub keywords: Vec<String>,
pub has_nested_parts: bool,
}
impl TryFrom<&str> for FormatSummary {
@@ -33,9 +34,10 @@ impl TryFrom<&str> for FormatSummary {
fn try_from(literal: &str) -> Result<Self, Self::Error> {
let format_string = FormatString::from_str(literal)?;
let mut autos = FxHashSet::default();
let mut indexes = FxHashSet::default();
let mut keywords = FxHashSet::default();
let mut autos = Vec::new();
let mut indexes = Vec::new();
let mut keywords = Vec::new();
let mut has_nested_parts = false;
for format_part in format_string.format_parts {
let FormatPart::Field {
@@ -47,9 +49,9 @@ impl TryFrom<&str> for FormatSummary {
};
let parsed = FieldName::parse(&field_name)?;
match parsed.field_type {
FieldType::Auto => autos.insert(autos.len()),
FieldType::Index(i) => indexes.insert(i),
FieldType::Keyword(k) => keywords.insert(k),
FieldType::Auto => autos.push(autos.len()),
FieldType::Index(i) => indexes.push(i),
FieldType::Keyword(k) => keywords.push(k),
};
let nested = FormatString::from_str(&format_spec)?;
@@ -59,10 +61,11 @@ impl TryFrom<&str> for FormatSummary {
};
let parsed = FieldName::parse(&field_name)?;
match parsed.field_type {
FieldType::Auto => autos.insert(autos.len()),
FieldType::Index(i) => indexes.insert(i),
FieldType::Keyword(k) => keywords.insert(k),
FieldType::Auto => autos.push(autos.len()),
FieldType::Index(i) => indexes.push(i),
FieldType::Keyword(k) => keywords.push(k),
};
has_nested_parts = true;
}
}
@@ -70,6 +73,7 @@ impl TryFrom<&str> for FormatSummary {
autos,
indexes,
keywords,
has_nested_parts,
})
}
}
@@ -82,9 +86,9 @@ mod tests {
fn test_format_summary() {
let literal = "foo{foo}a{}b{2}c{2}d{1}{}{}e{bar}{foo}f{spam}";
let expected_autos = [0usize, 1usize, 2usize].into_iter().collect();
let expected_indexes = [1usize, 2usize].into_iter().collect();
let expected_keywords = ["foo", "bar", "spam"]
let expected_autos = [0usize, 1usize, 2usize].to_vec();
let expected_indexes = [2usize, 2usize, 1usize].to_vec();
let expected_keywords: Vec<_> = ["foo", "bar", "foo", "spam"]
.into_iter()
.map(String::from)
.collect();
@@ -94,15 +98,16 @@ mod tests {
assert_eq!(format_summary.autos, expected_autos);
assert_eq!(format_summary.indexes, expected_indexes);
assert_eq!(format_summary.keywords, expected_keywords);
assert!(!format_summary.has_nested_parts);
}
#[test]
fn test_format_summary_nested() {
let literal = "foo{foo}a{:{}{}}b{2:{3}{4}}c{2}d{1}{}e{bar:{spam}{eggs}}";
let expected_autos = [0usize, 1usize, 2usize, 3usize].into_iter().collect();
let expected_indexes = [1usize, 2usize, 3usize, 4usize].into_iter().collect();
let expected_keywords = ["foo", "bar", "spam", "eggs"]
let expected_autos = [0usize, 1usize, 2usize, 3usize].to_vec();
let expected_indexes = [2usize, 3usize, 4usize, 2usize, 1usize].to_vec();
let expected_keywords: Vec<_> = ["foo", "bar", "spam", "eggs"]
.into_iter()
.map(String::from)
.collect();
@@ -112,6 +117,7 @@ mod tests {
assert_eq!(format_summary.autos, expected_autos);
assert_eq!(format_summary.indexes, expected_indexes);
assert_eq!(format_summary.keywords, expected_keywords);
assert!(format_summary.has_nested_parts);
}
#[test]

View File

@@ -163,6 +163,29 @@ mod tests {
Ok(())
}
#[test]
fn default_typing_modules() -> Result<()> {
let diagnostics = test_path(
Path::new("./resources/test/fixtures/pyflakes/typing_modules.py"),
&settings::Settings::for_rules(vec![RuleCode::F821]),
)?;
insta::assert_yaml_snapshot!(diagnostics);
Ok(())
}
#[test]
fn extra_typing_modules() -> Result<()> {
let diagnostics = test_path(
Path::new("./resources/test/fixtures/pyflakes/typing_modules.py"),
&settings::Settings {
typing_modules: vec!["airflow.typing_compat".to_string()],
..settings::Settings::for_rules(vec![RuleCode::F821])
},
)?;
insta::assert_yaml_snapshot!(diagnostics);
Ok(())
}
#[test]
fn future_annotations() -> Result<()> {
let diagnostics = test_path(

View File

@@ -0,0 +1,15 @@
---
source: src/pyflakes/mod.rs
expression: diagnostics
---
- kind:
UndefinedName: db
location:
row: 6
column: 34
end_location:
row: 6
column: 38
fix: ~
parent: ~

View File

@@ -0,0 +1,15 @@
---
source: src/pyflakes/mod.rs
expression: diagnostics
---
- kind:
UndefinedName: Class
location:
row: 7
column: 13
end_location:
row: 7
column: 20
fix: ~
parent: ~

View File

@@ -5,7 +5,7 @@ use rustpython_ast::{Expr, ExprKind};
use crate::ast::helpers::{collect_call_paths, dealias_call_path, match_call_path};
// See: https://pypi.org/project/typing-extensions/
static TYPING_EXTENSIONS: Lazy<FxHashSet<&'static str>> = Lazy::new(|| {
pub static TYPING_EXTENSIONS: Lazy<FxHashSet<&'static str>> = Lazy::new(|| {
FxHashSet::from_iter([
"Annotated",
"Any",
@@ -61,159 +61,157 @@ static TYPING_EXTENSIONS: Lazy<FxHashSet<&'static str>> = Lazy::new(|| {
])
});
pub fn in_extensions(name: &str) -> bool {
TYPING_EXTENSIONS.contains(name)
}
// See: https://docs.python.org/3/library/typing.html
static SUBSCRIPTS: Lazy<FxHashMap<&'static str, Vec<&'static str>>> = Lazy::new(|| {
let mut subscripts: FxHashMap<&'static str, Vec<&'static str>> = FxHashMap::default();
for (module, name) in [
// builtins
("", "dict"),
("", "frozenset"),
("", "list"),
("", "set"),
("", "tuple"),
("", "type"),
// `collections`
("collections", "ChainMap"),
("collections", "Counter"),
("collections", "OrderedDict"),
("collections", "defaultdict"),
("collections", "deque"),
// `collections.abc`
("collections.abc", "AsyncGenerator"),
("collections.abc", "AsyncIterable"),
("collections.abc", "AsyncIterator"),
("collections.abc", "Awaitable"),
("collections.abc", "ByteString"),
("collections.abc", "Callable"),
("collections.abc", "Collection"),
("collections.abc", "Container"),
("collections.abc", "Coroutine"),
("collections.abc", "Generator"),
("collections.abc", "ItemsView"),
("collections.abc", "Iterable"),
("collections.abc", "Iterator"),
("collections.abc", "KeysView"),
("collections.abc", "Mapping"),
("collections.abc", "MappingView"),
("collections.abc", "MutableMapping"),
("collections.abc", "MutableSequence"),
("collections.abc", "MutableSet"),
("collections.abc", "Reversible"),
("collections.abc", "Sequence"),
("collections.abc", "Set"),
("collections.abc", "ValuesView"),
// `contextlib`
("contextlib", "AbstractAsyncContextManager"),
("contextlib", "AbstractContextManager"),
// `re`
("re", "Match"),
("re", "Pattern"),
// `typing`
("typing", "AbstractSet"),
("typing", "AsyncContextManager"),
("typing", "AsyncGenerator"),
("typing", "AsyncIterator"),
("typing", "Awaitable"),
("typing", "BinaryIO"),
("typing", "ByteString"),
("typing", "Callable"),
("typing", "ChainMap"),
("typing", "ClassVar"),
("typing", "Collection"),
("typing", "Concatenate"),
("typing", "Container"),
("typing", "ContextManager"),
("typing", "Coroutine"),
("typing", "Counter"),
("typing", "DefaultDict"),
("typing", "Deque"),
("typing", "Dict"),
("typing", "Final"),
("typing", "FrozenSet"),
("typing", "Generator"),
("typing", "Generic"),
("typing", "IO"),
("typing", "ItemsView"),
("typing", "Iterable"),
("typing", "Iterator"),
("typing", "KeysView"),
("typing", "List"),
("typing", "Mapping"),
("typing", "Match"),
("typing", "MutableMapping"),
("typing", "MutableSequence"),
("typing", "MutableSet"),
("typing", "Optional"),
("typing", "OrderedDict"),
("typing", "Pattern"),
("typing", "Reversible"),
("typing", "Sequence"),
("typing", "Set"),
("typing", "TextIO"),
("typing", "Tuple"),
("typing", "Type"),
("typing", "TypeGuard"),
("typing", "Union"),
("typing", "Unpack"),
("typing", "ValuesView"),
// `typing.io`
("typing.io", "BinaryIO"),
("typing.io", "IO"),
("typing.io", "TextIO"),
// `typing.re`
("typing.re", "Match"),
("typing.re", "Pattern"),
// `typing_extensions`
("typing_extensions", "AsyncContextManager"),
("typing_extensions", "AsyncGenerator"),
("typing_extensions", "AsyncIterable"),
("typing_extensions", "AsyncIterator"),
("typing_extensions", "Awaitable"),
("typing_extensions", "ChainMap"),
("typing_extensions", "ClassVar"),
("typing_extensions", "Concatenate"),
("typing_extensions", "ContextManager"),
("typing_extensions", "Coroutine"),
("typing_extensions", "Counter"),
("typing_extensions", "DefaultDict"),
("typing_extensions", "Deque"),
("typing_extensions", "Type"),
// `weakref`
("weakref", "WeakKeyDictionary"),
("weakref", "WeakSet"),
("weakref", "WeakValueDictionary"),
] {
subscripts.entry(name).or_default().push(module);
}
subscripts
});
// See: https://docs.python.org/3/library/typing.html
const SUBSCRIPTS: &[(&str, &str)] = &[
// builtins
("", "dict"),
("", "frozenset"),
("", "list"),
("", "set"),
("", "tuple"),
("", "type"),
// `collections`
("collections", "ChainMap"),
("collections", "Counter"),
("collections", "OrderedDict"),
("collections", "defaultdict"),
("collections", "deque"),
// `collections.abc`
("collections.abc", "AsyncGenerator"),
("collections.abc", "AsyncIterable"),
("collections.abc", "AsyncIterator"),
("collections.abc", "Awaitable"),
("collections.abc", "ByteString"),
("collections.abc", "Callable"),
("collections.abc", "Collection"),
("collections.abc", "Container"),
("collections.abc", "Coroutine"),
("collections.abc", "Generator"),
("collections.abc", "ItemsView"),
("collections.abc", "Iterable"),
("collections.abc", "Iterator"),
("collections.abc", "KeysView"),
("collections.abc", "Mapping"),
("collections.abc", "MappingView"),
("collections.abc", "MutableMapping"),
("collections.abc", "MutableSequence"),
("collections.abc", "MutableSet"),
("collections.abc", "Reversible"),
("collections.abc", "Sequence"),
("collections.abc", "Set"),
("collections.abc", "ValuesView"),
// `contextlib`
("contextlib", "AbstractAsyncContextManager"),
("contextlib", "AbstractContextManager"),
// `re`
("re", "Match"),
("re", "Pattern"),
// `typing`
("typing", "AbstractSet"),
("typing", "AsyncContextManager"),
("typing", "AsyncGenerator"),
("typing", "AsyncIterator"),
("typing", "Awaitable"),
("typing", "BinaryIO"),
("typing", "ByteString"),
("typing", "Callable"),
("typing", "ChainMap"),
("typing", "ClassVar"),
("typing", "Collection"),
("typing", "Concatenate"),
("typing", "Container"),
("typing", "ContextManager"),
("typing", "Coroutine"),
("typing", "Counter"),
("typing", "DefaultDict"),
("typing", "Deque"),
("typing", "Dict"),
("typing", "Final"),
("typing", "FrozenSet"),
("typing", "Generator"),
("typing", "Generic"),
("typing", "IO"),
("typing", "ItemsView"),
("typing", "Iterable"),
("typing", "Iterator"),
("typing", "KeysView"),
("typing", "List"),
("typing", "Mapping"),
("typing", "Match"),
("typing", "MutableMapping"),
("typing", "MutableSequence"),
("typing", "MutableSet"),
("typing", "Optional"),
("typing", "OrderedDict"),
("typing", "Pattern"),
("typing", "Reversible"),
("typing", "Sequence"),
("typing", "Set"),
("typing", "TextIO"),
("typing", "Tuple"),
("typing", "Type"),
("typing", "TypeGuard"),
("typing", "Union"),
("typing", "Unpack"),
("typing", "ValuesView"),
// `typing.io`
("typing.io", "BinaryIO"),
("typing.io", "IO"),
("typing.io", "TextIO"),
// `typing.re`
("typing.re", "Match"),
("typing.re", "Pattern"),
// `typing_extensions`
("typing_extensions", "AsyncContextManager"),
("typing_extensions", "AsyncGenerator"),
("typing_extensions", "AsyncIterable"),
("typing_extensions", "AsyncIterator"),
("typing_extensions", "Awaitable"),
("typing_extensions", "ChainMap"),
("typing_extensions", "ClassVar"),
("typing_extensions", "Concatenate"),
("typing_extensions", "ContextManager"),
("typing_extensions", "Coroutine"),
("typing_extensions", "Counter"),
("typing_extensions", "DefaultDict"),
("typing_extensions", "Deque"),
("typing_extensions", "Type"),
// `weakref`
("weakref", "WeakKeyDictionary"),
("weakref", "WeakSet"),
("weakref", "WeakValueDictionary"),
];
// See: https://docs.python.org/3/library/typing.html
const PEP_583_SUBSCRIPTS: &[(&str, &str)] = &[
// `typing`
("typing", "Annotated"),
// `typing_extensions`
("typing_extensions", "Annotated"),
];
// See: https://peps.python.org/pep-0585/
const PEP_585_BUILTINS_ELIGIBLE: &[(&str, &str)] = &[
("typing", "Dict"),
("typing", "FrozenSet"),
("typing", "List"),
("typing", "Set"),
("typing", "Tuple"),
("typing", "Type"),
("typing_extensions", "Type"),
];
static PEP_593_SUBSCRIPTS: Lazy<FxHashMap<&'static str, Vec<&'static str>>> = Lazy::new(|| {
let mut subscripts: FxHashMap<&'static str, Vec<&'static str>> = FxHashMap::default();
for (module, name) in [
// `typing`
("typing", "Annotated"),
// `typing_extensions`
("typing_extensions", "Annotated"),
] {
subscripts.entry(name).or_default().push(module);
}
subscripts
});
pub enum SubscriptKind {
AnnotatedSubscript,
PEP593AnnotatedSubscript,
}
pub fn match_annotated_subscript<F>(
pub fn match_annotated_subscript<'a, F>(
expr: &Expr,
from_imports: &FxHashMap<&str, FxHashSet<&str>>,
import_aliases: &FxHashMap<&str, &str>,
typing_modules: impl Iterator<Item = &'a str>,
is_builtin: F,
) -> Option<SubscriptKind>
where
@@ -226,23 +224,49 @@ where
return None;
}
let call_path = dealias_call_path(collect_call_paths(expr), import_aliases);
if !call_path.is_empty() {
for (module, member) in SUBSCRIPTS {
if match_call_path(&call_path, module, member, from_imports)
&& (!module.is_empty() || is_builtin(member))
{
return Some(SubscriptKind::AnnotatedSubscript);
if let Some(member) = call_path.last() {
if let Some(modules) = SUBSCRIPTS.get(member) {
for module in modules {
if match_call_path(&call_path, module, member, from_imports)
&& (!module.is_empty() || is_builtin(member))
{
return Some(SubscriptKind::AnnotatedSubscript);
}
}
}
for (module, member) in PEP_583_SUBSCRIPTS {
if match_call_path(&call_path, module, member, from_imports) {
return Some(SubscriptKind::PEP593AnnotatedSubscript);
for module in typing_modules {
if match_call_path(&call_path, module, member, from_imports) {
return Some(SubscriptKind::AnnotatedSubscript);
}
}
} else if let Some(modules) = PEP_593_SUBSCRIPTS.get(member) {
for module in modules {
if match_call_path(&call_path, module, member, from_imports)
&& (!module.is_empty() || is_builtin(member))
{
return Some(SubscriptKind::PEP593AnnotatedSubscript);
}
}
for module in typing_modules {
if match_call_path(&call_path, module, member, from_imports) {
return Some(SubscriptKind::PEP593AnnotatedSubscript);
}
}
}
}
None
}
// See: https://peps.python.org/pep-0585/
const PEP_585_BUILTINS_ELIGIBLE: &[(&str, &str)] = &[
("typing", "Dict"),
("typing", "FrozenSet"),
("typing", "List"),
("typing", "Set"),
("typing", "Tuple"),
("typing", "Type"),
("typing_extensions", "Type"),
];
/// Returns `true` if `Expr` represents a reference to a typing object with a
/// PEP 585 built-in.
pub fn is_pep585_builtin(

View File

@@ -50,6 +50,8 @@ mod tests {
#[test_case(RuleCode::UP028, Path::new("UP028_0.py"); "UP028_0")]
#[test_case(RuleCode::UP028, Path::new("UP028_1.py"); "UP028_1")]
#[test_case(RuleCode::UP029, Path::new("UP029.py"); "UP029")]
#[test_case(RuleCode::UP030, Path::new("UP030_0.py"); "UP030_0")]
#[test_case(RuleCode::UP030, Path::new("UP030_1.py"); "UP030_1")]
fn rules(rule_code: RuleCode, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.as_ref(), path.to_string_lossy());
let diagnostics = test_path(

View File

@@ -0,0 +1,118 @@
use anyhow::{anyhow, bail, Result};
use libcst_native::{Arg, Codegen, CodegenState, Expression};
use once_cell::sync::Lazy;
use regex::Regex;
use rustpython_ast::Expr;
use crate::ast::types::Range;
use crate::autofix::Fix;
use crate::checkers::ast::Checker;
use crate::cst::matchers::{match_call, match_expression};
use crate::pyflakes::format::FormatSummary;
use crate::registry::Diagnostic;
use crate::violations;
// An opening curly brace, followed by any integer, followed by any text,
// followed by a closing brace.
static FORMAT_SPECIFIER: Lazy<Regex> =
Lazy::new(|| Regex::new(r"\{(?P<int>\d+)(?P<fmt>.*?)}").unwrap());
/// Returns a string without the format specifiers.
/// Ex. "Hello {0} {1}" -> "Hello {} {}"
fn remove_specifiers(raw_specifiers: &str) -> String {
FORMAT_SPECIFIER
.replace_all(raw_specifiers, "{$fmt}")
.to_string()
}
/// Return the corrected argument vector.
fn generate_arguments<'a>(
old_args: &[Arg<'a>],
correct_order: &'a [usize],
) -> Result<Vec<Arg<'a>>> {
let mut new_args: Vec<Arg> = Vec::with_capacity(old_args.len());
for (idx, given) in correct_order.iter().enumerate() {
// We need to keep the formatting in the same order but move the values.
let values = old_args
.get(*given)
.ok_or_else(|| anyhow!("Failed to extract argument at: {given}"))?;
let formatting = old_args
.get(idx)
.ok_or_else(|| anyhow!("Failed to extract argument at: {idx}"))?;
let new_arg = Arg {
value: values.value.clone(),
comma: formatting.comma.clone(),
equal: None,
keyword: None,
star: values.star,
whitespace_after_star: formatting.whitespace_after_star.clone(),
whitespace_after_arg: formatting.whitespace_after_arg.clone(),
};
new_args.push(new_arg);
}
Ok(new_args)
}
/// Returns the corrected function call.
fn generate_call(module_text: &str, correct_order: &[usize]) -> Result<String> {
let mut expression = match_expression(module_text)?;
let mut call = match_call(&mut expression)?;
// Fix the call arguments.
call.args = generate_arguments(&call.args, correct_order)?;
// Fix the string itself.
let Expression::Attribute(item) = &*call.func else {
panic!("Expected: Expression::Attribute")
};
let mut state = CodegenState::default();
item.codegen(&mut state);
let cleaned = remove_specifiers(&state.to_string());
call.func = Box::new(match_expression(&cleaned)?);
let mut state = CodegenState::default();
expression.codegen(&mut state);
if module_text == state.to_string() {
// Ex) `'{' '0}'.format(1)`
bail!("Failed to generate call expression for: {module_text}")
}
Ok(state.to_string())
}
/// UP030
pub(crate) fn format_literals(checker: &mut Checker, summary: &FormatSummary, expr: &Expr) {
// The format we expect is, e.g.: `"{0} {1}".format(...)`
if summary.has_nested_parts {
return;
}
if !summary.keywords.is_empty() {
return;
}
if !summary.autos.is_empty() {
return;
}
if !(0..summary.indexes.len()).all(|index| summary.indexes.contains(&index)) {
return;
}
let mut diagnostic = Diagnostic::new(violations::FormatLiterals, Range::from_located(expr));
if checker.patch(diagnostic.kind.code()) {
// Currently, the only issue we know of is in LibCST:
// https://github.com/Instagram/LibCST/issues/846
if let Ok(contents) = generate_call(
&checker
.locator
.slice_source_code_range(&Range::from_located(expr)),
&summary.indexes,
) {
diagnostic.amend(Fix::replacement(
contents,
expr.location,
expr.end_location.unwrap(),
));
};
}
checker.diagnostics.push(diagnostic);
}

View File

@@ -1,34 +1,35 @@
pub use convert_named_tuple_functional_to_class::convert_named_tuple_functional_to_class;
pub use convert_typed_dict_functional_to_class::convert_typed_dict_functional_to_class;
pub use datetime_utc_alias::datetime_utc_alias;
pub use deprecated_unittest_alias::deprecated_unittest_alias;
pub use native_literals::native_literals;
pub(crate) use convert_named_tuple_functional_to_class::convert_named_tuple_functional_to_class;
pub(crate) use convert_typed_dict_functional_to_class::convert_typed_dict_functional_to_class;
pub(crate) use datetime_utc_alias::datetime_utc_alias;
pub(crate) use deprecated_unittest_alias::deprecated_unittest_alias;
pub(crate) use format_literals::format_literals;
pub(crate) use native_literals::native_literals;
use once_cell::sync::Lazy;
pub use open_alias::open_alias;
pub use os_error_alias::os_error_alias;
pub use redundant_open_modes::redundant_open_modes;
pub(crate) use open_alias::open_alias;
pub(crate) use os_error_alias::os_error_alias;
pub(crate) use redundant_open_modes::redundant_open_modes;
use regex::Regex;
pub use remove_six_compat::remove_six_compat;
pub use replace_stdout_stderr::replace_stdout_stderr;
pub use replace_universal_newlines::replace_universal_newlines;
pub use rewrite_c_element_tree::replace_c_element_tree;
pub use rewrite_mock_import::{rewrite_mock_attribute, rewrite_mock_import};
pub use rewrite_unicode_literal::rewrite_unicode_literal;
pub use rewrite_yield_from::rewrite_yield_from;
pub(crate) use remove_six_compat::remove_six_compat;
pub(crate) use replace_stdout_stderr::replace_stdout_stderr;
pub(crate) use replace_universal_newlines::replace_universal_newlines;
pub(crate) use rewrite_c_element_tree::replace_c_element_tree;
pub(crate) use rewrite_mock_import::{rewrite_mock_attribute, rewrite_mock_import};
pub(crate) use rewrite_unicode_literal::rewrite_unicode_literal;
pub(crate) use rewrite_yield_from::rewrite_yield_from;
use rustpython_ast::Location;
use rustpython_parser::ast::{ArgData, Expr, ExprKind, Stmt, StmtKind};
pub use super_call_with_parameters::super_call_with_parameters;
pub use type_of_primitive::type_of_primitive;
pub use typing_text_str_alias::typing_text_str_alias;
pub use unnecessary_builtin_import::unnecessary_builtin_import;
pub use unnecessary_encode_utf8::unnecessary_encode_utf8;
pub use unnecessary_future_import::unnecessary_future_import;
pub use unnecessary_lru_cache_params::unnecessary_lru_cache_params;
pub use unpack_list_comprehension::unpack_list_comprehension;
pub use use_pep585_annotation::use_pep585_annotation;
pub use use_pep604_annotation::use_pep604_annotation;
pub use useless_metaclass_type::useless_metaclass_type;
pub use useless_object_inheritance::useless_object_inheritance;
pub(crate) use super_call_with_parameters::super_call_with_parameters;
pub(crate) use type_of_primitive::type_of_primitive;
pub(crate) use typing_text_str_alias::typing_text_str_alias;
pub(crate) use unnecessary_builtin_import::unnecessary_builtin_import;
pub(crate) use unnecessary_encode_utf8::unnecessary_encode_utf8;
pub(crate) use unnecessary_future_import::unnecessary_future_import;
pub(crate) use unnecessary_lru_cache_params::unnecessary_lru_cache_params;
pub(crate) use unpack_list_comprehension::unpack_list_comprehension;
pub(crate) use use_pep585_annotation::use_pep585_annotation;
pub(crate) use use_pep604_annotation::use_pep604_annotation;
pub(crate) use useless_metaclass_type::useless_metaclass_type;
pub(crate) use useless_object_inheritance::useless_object_inheritance;
use crate::ast::helpers::{self};
use crate::ast::types::{Range, Scope, ScopeKind};
@@ -40,6 +41,7 @@ mod convert_named_tuple_functional_to_class;
mod convert_typed_dict_functional_to_class;
mod datetime_utc_alias;
mod deprecated_unittest_alias;
mod format_literals;
mod native_literals;
mod open_alias;
mod os_error_alias;

View File

@@ -0,0 +1,188 @@
---
source: src/pyupgrade/mod.rs
expression: diagnostics
---
- kind:
FormatLiterals: ~
location:
row: 3
column: 0
end_location:
row: 3
column: 33
fix:
content: "\"{}\" \"{}\" \"{}\".format(1, 2, 3)"
location:
row: 3
column: 0
end_location:
row: 3
column: 33
parent: ~
- kind:
FormatLiterals: ~
location:
row: 5
column: 0
end_location:
row: 7
column: 1
fix:
content: "\"a {} complicated {} string with {} {}\".format(\n \"fourth\", \"second\", \"first\", \"third\"\n)"
location:
row: 5
column: 0
end_location:
row: 7
column: 1
parent: ~
- kind:
FormatLiterals: ~
location:
row: 9
column: 0
end_location:
row: 9
column: 15
fix:
content: "'{}'.format(1)"
location:
row: 9
column: 0
end_location:
row: 9
column: 15
parent: ~
- kind:
FormatLiterals: ~
location:
row: 11
column: 0
end_location:
row: 11
column: 18
fix:
content: "'{:x}'.format(30)"
location:
row: 11
column: 0
end_location:
row: 11
column: 18
parent: ~
- kind:
FormatLiterals: ~
location:
row: 13
column: 4
end_location:
row: 13
column: 19
fix:
content: "'{}'.format(1)"
location:
row: 13
column: 4
end_location:
row: 13
column: 19
parent: ~
- kind:
FormatLiterals: ~
location:
row: 15
column: 0
end_location:
row: 15
column: 29
fix:
content: "'''{}\\n{}\\n'''.format(1, 2)"
location:
row: 15
column: 0
end_location:
row: 15
column: 29
parent: ~
- kind:
FormatLiterals: ~
location:
row: 17
column: 4
end_location:
row: 18
column: 26
fix:
content: "\"foo {}\" \\\n \"bar {}\".format(1, 2)"
location:
row: 17
column: 4
end_location:
row: 18
column: 26
parent: ~
- kind:
FormatLiterals: ~
location:
row: 20
column: 0
end_location:
row: 20
column: 17
fix:
content: "(\"{}\").format(1)"
location:
row: 20
column: 0
end_location:
row: 20
column: 17
parent: ~
- kind:
FormatLiterals: ~
location:
row: 22
column: 0
end_location:
row: 22
column: 27
fix:
content: "\"\\N{snowman} {}\".format(1)"
location:
row: 22
column: 0
end_location:
row: 22
column: 27
parent: ~
- kind:
FormatLiterals: ~
location:
row: 24
column: 0
end_location:
row: 24
column: 18
fix: ~
parent: ~
- kind:
FormatLiterals: ~
location:
row: 29
column: 4
end_location:
row: 30
column: 25
fix: ~
parent: ~
- kind:
FormatLiterals: ~
location:
row: 34
column: 4
end_location:
row: 35
column: 25
fix: ~
parent: ~

View File

@@ -0,0 +1,6 @@
---
source: src/pyupgrade/mod.rs
expression: diagnostics
---
[]

View File

@@ -54,6 +54,14 @@ macro_rules! define_rule_mapping {
)+
}
}
pub fn origin(&self) -> RuleOrigin {
match self {
$(
RuleCode::$code => ruff_macros::origin_by_code!($code),
)+
}
}
}
impl DiagnosticKind {
@@ -295,6 +303,7 @@ define_rule_mapping!(
SIM109 => violations::CompareWithTuple,
SIM110 => violations::ConvertLoopToAny,
SIM111 => violations::ConvertLoopToAll,
SIM112 => violations::UseCapitalEnvironmentVariables,
SIM117 => violations::MultipleWithStatements,
SIM118 => violations::KeyInDict,
SIM201 => violations::NegateEqualOp,
@@ -337,6 +346,7 @@ define_rule_mapping!(
UP027 => violations::RewriteListComprehension,
UP028 => violations::RewriteYieldFrom,
UP029 => violations::UnnecessaryBuiltinImport,
UP030 => violations::FormatLiterals,
// pydocstyle
D100 => violations::PublicModule,
D101 => violations::PublicClass,
@@ -401,6 +411,7 @@ define_rule_mapping!(
N818 => violations::ErrorSuffixOnExceptionName,
// isort
I001 => violations::UnsortedImports,
I002 => violations::MissingRequiredImport,
// eradicate
ERA001 => violations::CommentedOutCode,
// flake8-bandit
@@ -416,6 +427,8 @@ define_rule_mapping!(
S324 => violations::HashlibInsecureHashFunction,
S501 => violations::RequestWithNoCertValidation,
S506 => violations::UnsafeYAMLLoad,
S508 => violations::SnmpInsecureVersion,
S509 => violations::SnmpWeakCryptography,
// flake8-boolean-trap
FBT001 => violations::BooleanPositionalArgInFunctionDefinition,
FBT002 => violations::BooleanDefaultValueInFunctionDefinition,
@@ -786,406 +799,10 @@ impl RuleCode {
| RuleCode::RUF002
| RuleCode::RUF003 => &LintSource::Tokens,
RuleCode::E902 => &LintSource::FileSystem,
RuleCode::I001 => &LintSource::Imports,
RuleCode::I001 | RuleCode::I002 => &LintSource::Imports,
_ => &LintSource::AST,
}
}
pub fn origin(&self) -> RuleOrigin {
#[allow(clippy::match_same_arms)]
match self {
// flake8-builtins
RuleCode::A001 => RuleOrigin::Flake8Builtins,
RuleCode::A002 => RuleOrigin::Flake8Builtins,
RuleCode::A003 => RuleOrigin::Flake8Builtins,
// flake8-annotations
RuleCode::ANN001 => RuleOrigin::Flake8Annotations,
RuleCode::ANN002 => RuleOrigin::Flake8Annotations,
RuleCode::ANN003 => RuleOrigin::Flake8Annotations,
RuleCode::ANN101 => RuleOrigin::Flake8Annotations,
RuleCode::ANN102 => RuleOrigin::Flake8Annotations,
RuleCode::ANN201 => RuleOrigin::Flake8Annotations,
RuleCode::ANN202 => RuleOrigin::Flake8Annotations,
RuleCode::ANN204 => RuleOrigin::Flake8Annotations,
RuleCode::ANN205 => RuleOrigin::Flake8Annotations,
RuleCode::ANN206 => RuleOrigin::Flake8Annotations,
RuleCode::ANN401 => RuleOrigin::Flake8Annotations,
// flake8-unused-arguments
RuleCode::ARG001 => RuleOrigin::Flake8UnusedArguments,
RuleCode::ARG002 => RuleOrigin::Flake8UnusedArguments,
RuleCode::ARG003 => RuleOrigin::Flake8UnusedArguments,
RuleCode::ARG004 => RuleOrigin::Flake8UnusedArguments,
RuleCode::ARG005 => RuleOrigin::Flake8UnusedArguments,
// flake8-bugbear
RuleCode::B002 => RuleOrigin::Flake8Bugbear,
RuleCode::B003 => RuleOrigin::Flake8Bugbear,
RuleCode::B004 => RuleOrigin::Flake8Bugbear,
RuleCode::B005 => RuleOrigin::Flake8Bugbear,
RuleCode::B006 => RuleOrigin::Flake8Bugbear,
RuleCode::B007 => RuleOrigin::Flake8Bugbear,
RuleCode::B008 => RuleOrigin::Flake8Bugbear,
RuleCode::B009 => RuleOrigin::Flake8Bugbear,
RuleCode::B010 => RuleOrigin::Flake8Bugbear,
RuleCode::B011 => RuleOrigin::Flake8Bugbear,
RuleCode::B012 => RuleOrigin::Flake8Bugbear,
RuleCode::B013 => RuleOrigin::Flake8Bugbear,
RuleCode::B014 => RuleOrigin::Flake8Bugbear,
RuleCode::B015 => RuleOrigin::Flake8Bugbear,
RuleCode::B016 => RuleOrigin::Flake8Bugbear,
RuleCode::B017 => RuleOrigin::Flake8Bugbear,
RuleCode::B018 => RuleOrigin::Flake8Bugbear,
RuleCode::B019 => RuleOrigin::Flake8Bugbear,
RuleCode::B020 => RuleOrigin::Flake8Bugbear,
RuleCode::B021 => RuleOrigin::Flake8Bugbear,
RuleCode::B022 => RuleOrigin::Flake8Bugbear,
RuleCode::B023 => RuleOrigin::Flake8Bugbear,
RuleCode::B024 => RuleOrigin::Flake8Bugbear,
RuleCode::B025 => RuleOrigin::Flake8Bugbear,
RuleCode::B026 => RuleOrigin::Flake8Bugbear,
RuleCode::B027 => RuleOrigin::Flake8Bugbear,
RuleCode::B904 => RuleOrigin::Flake8Bugbear,
RuleCode::B905 => RuleOrigin::Flake8Bugbear,
// flake8-blind-except
RuleCode::BLE001 => RuleOrigin::Flake8BlindExcept,
// flake8-comprehensions
RuleCode::C400 => RuleOrigin::Flake8Comprehensions,
RuleCode::C401 => RuleOrigin::Flake8Comprehensions,
RuleCode::C402 => RuleOrigin::Flake8Comprehensions,
RuleCode::C403 => RuleOrigin::Flake8Comprehensions,
RuleCode::C404 => RuleOrigin::Flake8Comprehensions,
RuleCode::C405 => RuleOrigin::Flake8Comprehensions,
RuleCode::C406 => RuleOrigin::Flake8Comprehensions,
RuleCode::C408 => RuleOrigin::Flake8Comprehensions,
RuleCode::C409 => RuleOrigin::Flake8Comprehensions,
RuleCode::C410 => RuleOrigin::Flake8Comprehensions,
RuleCode::C411 => RuleOrigin::Flake8Comprehensions,
RuleCode::C413 => RuleOrigin::Flake8Comprehensions,
RuleCode::C414 => RuleOrigin::Flake8Comprehensions,
RuleCode::C415 => RuleOrigin::Flake8Comprehensions,
RuleCode::C416 => RuleOrigin::Flake8Comprehensions,
RuleCode::C417 => RuleOrigin::Flake8Comprehensions,
// mccabe
RuleCode::C901 => RuleOrigin::McCabe,
// pydocstyle
RuleCode::D100 => RuleOrigin::Pydocstyle,
RuleCode::D101 => RuleOrigin::Pydocstyle,
RuleCode::D102 => RuleOrigin::Pydocstyle,
RuleCode::D103 => RuleOrigin::Pydocstyle,
RuleCode::D104 => RuleOrigin::Pydocstyle,
RuleCode::D105 => RuleOrigin::Pydocstyle,
RuleCode::D106 => RuleOrigin::Pydocstyle,
RuleCode::D107 => RuleOrigin::Pydocstyle,
RuleCode::D200 => RuleOrigin::Pydocstyle,
RuleCode::D201 => RuleOrigin::Pydocstyle,
RuleCode::D202 => RuleOrigin::Pydocstyle,
RuleCode::D203 => RuleOrigin::Pydocstyle,
RuleCode::D204 => RuleOrigin::Pydocstyle,
RuleCode::D205 => RuleOrigin::Pydocstyle,
RuleCode::D206 => RuleOrigin::Pydocstyle,
RuleCode::D207 => RuleOrigin::Pydocstyle,
RuleCode::D208 => RuleOrigin::Pydocstyle,
RuleCode::D209 => RuleOrigin::Pydocstyle,
RuleCode::D210 => RuleOrigin::Pydocstyle,
RuleCode::D211 => RuleOrigin::Pydocstyle,
RuleCode::D212 => RuleOrigin::Pydocstyle,
RuleCode::D213 => RuleOrigin::Pydocstyle,
RuleCode::D214 => RuleOrigin::Pydocstyle,
RuleCode::D215 => RuleOrigin::Pydocstyle,
RuleCode::D300 => RuleOrigin::Pydocstyle,
RuleCode::D301 => RuleOrigin::Pydocstyle,
RuleCode::D400 => RuleOrigin::Pydocstyle,
RuleCode::D402 => RuleOrigin::Pydocstyle,
RuleCode::D403 => RuleOrigin::Pydocstyle,
RuleCode::D404 => RuleOrigin::Pydocstyle,
RuleCode::D405 => RuleOrigin::Pydocstyle,
RuleCode::D406 => RuleOrigin::Pydocstyle,
RuleCode::D407 => RuleOrigin::Pydocstyle,
RuleCode::D408 => RuleOrigin::Pydocstyle,
RuleCode::D409 => RuleOrigin::Pydocstyle,
RuleCode::D410 => RuleOrigin::Pydocstyle,
RuleCode::D411 => RuleOrigin::Pydocstyle,
RuleCode::D412 => RuleOrigin::Pydocstyle,
RuleCode::D413 => RuleOrigin::Pydocstyle,
RuleCode::D414 => RuleOrigin::Pydocstyle,
RuleCode::D415 => RuleOrigin::Pydocstyle,
RuleCode::D416 => RuleOrigin::Pydocstyle,
RuleCode::D417 => RuleOrigin::Pydocstyle,
RuleCode::D418 => RuleOrigin::Pydocstyle,
RuleCode::D419 => RuleOrigin::Pydocstyle,
// flake8-datetimez
RuleCode::DTZ001 => RuleOrigin::Flake8Datetimez,
RuleCode::DTZ002 => RuleOrigin::Flake8Datetimez,
RuleCode::DTZ003 => RuleOrigin::Flake8Datetimez,
RuleCode::DTZ004 => RuleOrigin::Flake8Datetimez,
RuleCode::DTZ005 => RuleOrigin::Flake8Datetimez,
RuleCode::DTZ006 => RuleOrigin::Flake8Datetimez,
RuleCode::DTZ007 => RuleOrigin::Flake8Datetimez,
RuleCode::DTZ011 => RuleOrigin::Flake8Datetimez,
RuleCode::DTZ012 => RuleOrigin::Flake8Datetimez,
// pycodestyle (errors)
RuleCode::E401 => RuleOrigin::Pycodestyle,
RuleCode::E402 => RuleOrigin::Pycodestyle,
RuleCode::E501 => RuleOrigin::Pycodestyle,
RuleCode::E711 => RuleOrigin::Pycodestyle,
RuleCode::E712 => RuleOrigin::Pycodestyle,
RuleCode::E713 => RuleOrigin::Pycodestyle,
RuleCode::E714 => RuleOrigin::Pycodestyle,
RuleCode::E721 => RuleOrigin::Pycodestyle,
RuleCode::E722 => RuleOrigin::Pycodestyle,
RuleCode::E731 => RuleOrigin::Pycodestyle,
RuleCode::E741 => RuleOrigin::Pycodestyle,
RuleCode::E742 => RuleOrigin::Pycodestyle,
RuleCode::E743 => RuleOrigin::Pycodestyle,
RuleCode::E902 => RuleOrigin::Pycodestyle,
RuleCode::E999 => RuleOrigin::Pycodestyle,
// flake8-errmsg
RuleCode::EM101 => RuleOrigin::Flake8ErrMsg,
RuleCode::EM102 => RuleOrigin::Flake8ErrMsg,
RuleCode::EM103 => RuleOrigin::Flake8ErrMsg,
// eradicate
RuleCode::ERA001 => RuleOrigin::Eradicate,
// pyflakes
RuleCode::F401 => RuleOrigin::Pyflakes,
RuleCode::F402 => RuleOrigin::Pyflakes,
RuleCode::F403 => RuleOrigin::Pyflakes,
RuleCode::F404 => RuleOrigin::Pyflakes,
RuleCode::F405 => RuleOrigin::Pyflakes,
RuleCode::F406 => RuleOrigin::Pyflakes,
RuleCode::F407 => RuleOrigin::Pyflakes,
RuleCode::F501 => RuleOrigin::Pyflakes,
RuleCode::F502 => RuleOrigin::Pyflakes,
RuleCode::F503 => RuleOrigin::Pyflakes,
RuleCode::F504 => RuleOrigin::Pyflakes,
RuleCode::F505 => RuleOrigin::Pyflakes,
RuleCode::F506 => RuleOrigin::Pyflakes,
RuleCode::F507 => RuleOrigin::Pyflakes,
RuleCode::F508 => RuleOrigin::Pyflakes,
RuleCode::F509 => RuleOrigin::Pyflakes,
RuleCode::F521 => RuleOrigin::Pyflakes,
RuleCode::F522 => RuleOrigin::Pyflakes,
RuleCode::F523 => RuleOrigin::Pyflakes,
RuleCode::F524 => RuleOrigin::Pyflakes,
RuleCode::F525 => RuleOrigin::Pyflakes,
RuleCode::F541 => RuleOrigin::Pyflakes,
RuleCode::F601 => RuleOrigin::Pyflakes,
RuleCode::F602 => RuleOrigin::Pyflakes,
RuleCode::F621 => RuleOrigin::Pyflakes,
RuleCode::F622 => RuleOrigin::Pyflakes,
RuleCode::F631 => RuleOrigin::Pyflakes,
RuleCode::F632 => RuleOrigin::Pyflakes,
RuleCode::F633 => RuleOrigin::Pyflakes,
RuleCode::F634 => RuleOrigin::Pyflakes,
RuleCode::F701 => RuleOrigin::Pyflakes,
RuleCode::F702 => RuleOrigin::Pyflakes,
RuleCode::F704 => RuleOrigin::Pyflakes,
RuleCode::F706 => RuleOrigin::Pyflakes,
RuleCode::F707 => RuleOrigin::Pyflakes,
RuleCode::F722 => RuleOrigin::Pyflakes,
RuleCode::F811 => RuleOrigin::Pyflakes,
RuleCode::F821 => RuleOrigin::Pyflakes,
RuleCode::F822 => RuleOrigin::Pyflakes,
RuleCode::F823 => RuleOrigin::Pyflakes,
RuleCode::F841 => RuleOrigin::Pyflakes,
RuleCode::F842 => RuleOrigin::Pyflakes,
RuleCode::F901 => RuleOrigin::Pyflakes,
// flake8-boolean-trap
RuleCode::FBT001 => RuleOrigin::Flake8BooleanTrap,
RuleCode::FBT002 => RuleOrigin::Flake8BooleanTrap,
RuleCode::FBT003 => RuleOrigin::Flake8BooleanTrap,
// isort
RuleCode::I001 => RuleOrigin::Isort,
// flake8-import-conventions
RuleCode::ICN001 => RuleOrigin::Flake8ImportConventions,
// flake8-implicit-str-concat
RuleCode::ISC001 => RuleOrigin::Flake8ImplicitStrConcat,
RuleCode::ISC002 => RuleOrigin::Flake8ImplicitStrConcat,
RuleCode::ISC003 => RuleOrigin::Flake8ImplicitStrConcat,
// pep8-naming
RuleCode::N801 => RuleOrigin::PEP8Naming,
RuleCode::N802 => RuleOrigin::PEP8Naming,
RuleCode::N803 => RuleOrigin::PEP8Naming,
RuleCode::N804 => RuleOrigin::PEP8Naming,
RuleCode::N805 => RuleOrigin::PEP8Naming,
RuleCode::N806 => RuleOrigin::PEP8Naming,
RuleCode::N807 => RuleOrigin::PEP8Naming,
RuleCode::N811 => RuleOrigin::PEP8Naming,
RuleCode::N812 => RuleOrigin::PEP8Naming,
RuleCode::N813 => RuleOrigin::PEP8Naming,
RuleCode::N814 => RuleOrigin::PEP8Naming,
RuleCode::N815 => RuleOrigin::PEP8Naming,
RuleCode::N816 => RuleOrigin::PEP8Naming,
RuleCode::N817 => RuleOrigin::PEP8Naming,
RuleCode::N818 => RuleOrigin::PEP8Naming,
// pandas-vet
RuleCode::PD002 => RuleOrigin::PandasVet,
RuleCode::PD003 => RuleOrigin::PandasVet,
RuleCode::PD004 => RuleOrigin::PandasVet,
RuleCode::PD007 => RuleOrigin::PandasVet,
RuleCode::PD008 => RuleOrigin::PandasVet,
RuleCode::PD009 => RuleOrigin::PandasVet,
RuleCode::PD010 => RuleOrigin::PandasVet,
RuleCode::PD011 => RuleOrigin::PandasVet,
RuleCode::PD012 => RuleOrigin::PandasVet,
RuleCode::PD013 => RuleOrigin::PandasVet,
RuleCode::PD015 => RuleOrigin::PandasVet,
RuleCode::PD901 => RuleOrigin::PandasVet,
// pygrep-hooks
RuleCode::PGH001 => RuleOrigin::PygrepHooks,
RuleCode::PGH002 => RuleOrigin::PygrepHooks,
RuleCode::PGH003 => RuleOrigin::PygrepHooks,
RuleCode::PGH004 => RuleOrigin::PygrepHooks,
// pylint
RuleCode::PLC0414 => RuleOrigin::Pylint,
RuleCode::PLC2201 => RuleOrigin::Pylint,
RuleCode::PLC3002 => RuleOrigin::Pylint,
RuleCode::PLE0117 => RuleOrigin::Pylint,
RuleCode::PLE0118 => RuleOrigin::Pylint,
RuleCode::PLE1142 => RuleOrigin::Pylint,
RuleCode::PLR0206 => RuleOrigin::Pylint,
RuleCode::PLR0402 => RuleOrigin::Pylint,
RuleCode::PLR1701 => RuleOrigin::Pylint,
RuleCode::PLR1722 => RuleOrigin::Pylint,
RuleCode::PLW0120 => RuleOrigin::Pylint,
RuleCode::PLW0602 => RuleOrigin::Pylint,
// flake8-pytest-style
RuleCode::PT001 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT002 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT003 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT004 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT005 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT006 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT007 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT008 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT009 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT010 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT011 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT012 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT013 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT015 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT016 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT017 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT018 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT019 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT020 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT021 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT022 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT023 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT024 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT025 => RuleOrigin::Flake8PytestStyle,
RuleCode::PT026 => RuleOrigin::Flake8PytestStyle,
// flake8-quotes
RuleCode::Q000 => RuleOrigin::Flake8Quotes,
RuleCode::Q001 => RuleOrigin::Flake8Quotes,
RuleCode::Q002 => RuleOrigin::Flake8Quotes,
RuleCode::Q003 => RuleOrigin::Flake8Quotes,
// flake8-return
RuleCode::RET501 => RuleOrigin::Flake8Return,
RuleCode::RET502 => RuleOrigin::Flake8Return,
RuleCode::RET503 => RuleOrigin::Flake8Return,
RuleCode::RET504 => RuleOrigin::Flake8Return,
RuleCode::RET505 => RuleOrigin::Flake8Return,
RuleCode::RET506 => RuleOrigin::Flake8Return,
RuleCode::RET507 => RuleOrigin::Flake8Return,
RuleCode::RET508 => RuleOrigin::Flake8Return,
// flake8-bandit
RuleCode::S101 => RuleOrigin::Flake8Bandit,
RuleCode::S102 => RuleOrigin::Flake8Bandit,
RuleCode::S103 => RuleOrigin::Flake8Bandit,
RuleCode::S104 => RuleOrigin::Flake8Bandit,
RuleCode::S105 => RuleOrigin::Flake8Bandit,
RuleCode::S106 => RuleOrigin::Flake8Bandit,
RuleCode::S107 => RuleOrigin::Flake8Bandit,
RuleCode::S108 => RuleOrigin::Flake8Bandit,
RuleCode::S113 => RuleOrigin::Flake8Bandit,
RuleCode::S324 => RuleOrigin::Flake8Bandit,
RuleCode::S501 => RuleOrigin::Flake8Bandit,
RuleCode::S506 => RuleOrigin::Flake8Bandit,
// flake8-simplify
RuleCode::SIM103 => RuleOrigin::Flake8Simplify,
RuleCode::SIM101 => RuleOrigin::Flake8Simplify,
RuleCode::SIM102 => RuleOrigin::Flake8Simplify,
RuleCode::SIM105 => RuleOrigin::Flake8Simplify,
RuleCode::SIM107 => RuleOrigin::Flake8Simplify,
RuleCode::SIM108 => RuleOrigin::Flake8Simplify,
RuleCode::SIM109 => RuleOrigin::Flake8Simplify,
RuleCode::SIM110 => RuleOrigin::Flake8Simplify,
RuleCode::SIM111 => RuleOrigin::Flake8Simplify,
RuleCode::SIM117 => RuleOrigin::Flake8Simplify,
RuleCode::SIM118 => RuleOrigin::Flake8Simplify,
RuleCode::SIM201 => RuleOrigin::Flake8Simplify,
RuleCode::SIM202 => RuleOrigin::Flake8Simplify,
RuleCode::SIM208 => RuleOrigin::Flake8Simplify,
RuleCode::SIM210 => RuleOrigin::Flake8Simplify,
RuleCode::SIM211 => RuleOrigin::Flake8Simplify,
RuleCode::SIM212 => RuleOrigin::Flake8Simplify,
RuleCode::SIM220 => RuleOrigin::Flake8Simplify,
RuleCode::SIM221 => RuleOrigin::Flake8Simplify,
RuleCode::SIM222 => RuleOrigin::Flake8Simplify,
RuleCode::SIM223 => RuleOrigin::Flake8Simplify,
RuleCode::SIM300 => RuleOrigin::Flake8Simplify,
// flake8-debugger
RuleCode::T100 => RuleOrigin::Flake8Debugger,
// flake8-print
RuleCode::T201 => RuleOrigin::Flake8Print,
RuleCode::T203 => RuleOrigin::Flake8Print,
// flake8-tidy-imports
RuleCode::TID251 => RuleOrigin::Flake8TidyImports,
RuleCode::TID252 => RuleOrigin::Flake8TidyImports,
// pyupgrade
RuleCode::UP001 => RuleOrigin::Pyupgrade,
RuleCode::UP003 => RuleOrigin::Pyupgrade,
RuleCode::UP004 => RuleOrigin::Pyupgrade,
RuleCode::UP005 => RuleOrigin::Pyupgrade,
RuleCode::UP006 => RuleOrigin::Pyupgrade,
RuleCode::UP007 => RuleOrigin::Pyupgrade,
RuleCode::UP008 => RuleOrigin::Pyupgrade,
RuleCode::UP009 => RuleOrigin::Pyupgrade,
RuleCode::UP010 => RuleOrigin::Pyupgrade,
RuleCode::UP011 => RuleOrigin::Pyupgrade,
RuleCode::UP012 => RuleOrigin::Pyupgrade,
RuleCode::UP013 => RuleOrigin::Pyupgrade,
RuleCode::UP014 => RuleOrigin::Pyupgrade,
RuleCode::UP015 => RuleOrigin::Pyupgrade,
RuleCode::UP016 => RuleOrigin::Pyupgrade,
RuleCode::UP017 => RuleOrigin::Pyupgrade,
RuleCode::UP018 => RuleOrigin::Pyupgrade,
RuleCode::UP019 => RuleOrigin::Pyupgrade,
RuleCode::UP020 => RuleOrigin::Pyupgrade,
RuleCode::UP021 => RuleOrigin::Pyupgrade,
RuleCode::UP022 => RuleOrigin::Pyupgrade,
RuleCode::UP023 => RuleOrigin::Pyupgrade,
RuleCode::UP024 => RuleOrigin::Pyupgrade,
RuleCode::UP025 => RuleOrigin::Pyupgrade,
RuleCode::UP026 => RuleOrigin::Pyupgrade,
RuleCode::UP027 => RuleOrigin::Pyupgrade,
RuleCode::UP028 => RuleOrigin::Pyupgrade,
RuleCode::UP029 => RuleOrigin::Pyupgrade,
// pycodestyle (warnings)
RuleCode::W292 => RuleOrigin::Pycodestyle,
RuleCode::W605 => RuleOrigin::Pycodestyle,
// flake8-2020
RuleCode::YTT101 => RuleOrigin::Flake82020,
RuleCode::YTT102 => RuleOrigin::Flake82020,
RuleCode::YTT103 => RuleOrigin::Flake82020,
RuleCode::YTT201 => RuleOrigin::Flake82020,
RuleCode::YTT202 => RuleOrigin::Flake82020,
RuleCode::YTT203 => RuleOrigin::Flake82020,
RuleCode::YTT204 => RuleOrigin::Flake82020,
RuleCode::YTT301 => RuleOrigin::Flake82020,
RuleCode::YTT302 => RuleOrigin::Flake82020,
RuleCode::YTT303 => RuleOrigin::Flake82020,
// flake8-pie
RuleCode::PIE790 => RuleOrigin::Flake8Pie,
RuleCode::PIE794 => RuleOrigin::Flake8Pie,
RuleCode::PIE807 => RuleOrigin::Flake8Pie,
// Ruff
RuleCode::RUF001 => RuleOrigin::Ruff,
RuleCode::RUF002 => RuleOrigin::Ruff,
RuleCode::RUF003 => RuleOrigin::Ruff,
RuleCode::RUF004 => RuleOrigin::Ruff,
RuleCode::RUF100 => RuleOrigin::Ruff,
}
}
}
impl DiagnosticKind {

Some files were not shown because too many files have changed in this diff Show More