Compare commits

..

1 Commits

Author SHA1 Message Date
Dhruv Manilawala
b9d133f718 Update ecosystem with notebook repositories 2023-12-14 09:21:16 -06:00
287 changed files with 3450 additions and 10380 deletions

View File

@@ -5,10 +5,6 @@ updates:
schedule:
interval: "weekly"
labels: ["internal"]
groups:
actions:
patterns:
- "*"
- package-ecosystem: "cargo"
directory: "/"

View File

@@ -95,9 +95,9 @@ jobs:
rustup target add wasm32-unknown-unknown
- uses: Swatinem/rust-cache@v2
- name: "Clippy"
run: cargo clippy --workspace --all-targets --all-features --locked -- -D warnings
run: cargo clippy --workspace --all-targets --all-features -- -D warnings
- name: "Clippy (wasm)"
run: cargo clippy -p ruff_wasm --target wasm32-unknown-unknown --all-features --locked -- -D warnings
run: cargo clippy -p ruff_wasm --target wasm32-unknown-unknown --all-features -- -D warnings
cargo-test-linux:
runs-on: ubuntu-latest
@@ -226,7 +226,7 @@ jobs:
name: ruff
path: target/debug
- uses: dawidd6/action-download-artifact@v3
- uses: dawidd6/action-download-artifact@v2
name: Download baseline Ruff binary
with:
name: ruff

View File

@@ -17,7 +17,7 @@ jobs:
comment:
runs-on: ubuntu-latest
steps:
- uses: dawidd6/action-download-artifact@v3
- uses: dawidd6/action-download-artifact@v2
name: Download pull request number
with:
name: pr-number
@@ -32,7 +32,7 @@ jobs:
echo "pr-number=$(<pr-number)" >> $GITHUB_OUTPUT
fi
- uses: dawidd6/action-download-artifact@v3
- uses: dawidd6/action-download-artifact@v2
name: "Download ecosystem results"
id: download-ecosystem-result
if: steps.pr-number.outputs.pr-number

View File

@@ -73,7 +73,7 @@ jobs:
uses: PyO3/maturin-action@v1
with:
target: x86_64
args: --release --locked --out dist
args: --release --out dist
- name: "Test wheel - x86_64"
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
@@ -112,7 +112,7 @@ jobs:
- name: "Build wheels - universal2"
uses: PyO3/maturin-action@v1
with:
args: --release --locked --target universal2-apple-darwin --out dist
args: --release --target universal2-apple-darwin --out dist
- name: "Test wheel - universal2"
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*universal2.whl --force-reinstall
@@ -161,7 +161,7 @@ jobs:
uses: PyO3/maturin-action@v1
with:
target: ${{ matrix.platform.target }}
args: --release --locked --out dist
args: --release --out dist
- name: "Test wheel"
if: ${{ !startsWith(matrix.platform.target, 'aarch64') }}
shell: bash
@@ -210,7 +210,7 @@ jobs:
with:
target: ${{ matrix.target }}
manylinux: auto
args: --release --locked --out dist
args: --release --out dist
- name: "Test wheel"
if: ${{ startsWith(matrix.target, 'x86_64') }}
run: |
@@ -269,7 +269,7 @@ jobs:
target: ${{ matrix.platform.target }}
manylinux: auto
docker-options: ${{ matrix.platform.maturin_docker_options }}
args: --release --locked --out dist
args: --release --out dist
- uses: uraimo/run-on-arch-action@v2
if: matrix.platform.arch != 'ppc64'
name: Test wheel
@@ -324,7 +324,7 @@ jobs:
with:
target: ${{ matrix.target }}
manylinux: musllinux_1_2
args: --release --locked --out dist
args: --release --out dist
- name: "Test wheel"
if: matrix.target == 'x86_64-unknown-linux-musl'
uses: addnab/docker-run-action@v3
@@ -379,7 +379,7 @@ jobs:
with:
target: ${{ matrix.platform.target }}
manylinux: musllinux_1_2
args: --release --locked --out dist
args: --release --out dist
docker-options: ${{ matrix.platform.maturin_docker_options }}
- uses: uraimo/run-on-arch-action@v2
name: Test wheel

View File

@@ -1,42 +1,5 @@
# Breaking Changes
## 0.1.9
### `site-packages` is now excluded by default ([#5513](https://github.com/astral-sh/ruff/pull/5513))
Ruff maintains a list of default exclusions, which now consists of the following patterns:
- `.bzr`
- `.direnv`
- `.eggs`
- `.git-rewrite`
- `.git`
- `.hg`
- `.ipynb_checkpoints`
- `.mypy_cache`
- `.nox`
- `.pants.d`
- `.pyenv`
- `.pytest_cache`
- `.pytype`
- `.ruff_cache`
- `.svn`
- `.tox`
- `.venv`
- `.vscode`
- `__pypackages__`
- `_build`
- `buck-out`
- `build`
- `dist`
- `node_modules`
- `site-packages`
- `venv`
Previously, the `site-packages` directory was not excluded by default. While `site-packages` tends
to be excluded anyway by virtue of the `.venv` exclusion, this may not be the case when using Ruff
from VS Code outside a virtual environment.
## 0.1.0
### The deprecated `format` setting has been removed

View File

@@ -1,118 +1,5 @@
# Changelog
## 0.1.9
### Breaking changes
- Add site-packages to default exclusions ([#9188](https://github.com/astral-sh/ruff/pull/9188))
### Preview features
- Fix: Avoid parenthesizing subscript targets and values ([#9209](https://github.com/astral-sh/ruff/pull/9209))
- \[`pylint`\] Implement `too-many-locals` (`PLR0914`) ([#9163](https://github.com/astral-sh/ruff/pull/9163))
- Implement `reimplemented_operator` (FURB118) ([#9171](https://github.com/astral-sh/ruff/pull/9171))
- Add a rule to detect string members in runtime-evaluated unions ([#9143](https://github.com/astral-sh/ruff/pull/9143))
- Implement `no_blank_line_before_class_docstring` preview style ([#9154](https://github.com/astral-sh/ruff/pull/9154))
### Rule changes
- `CONSTANT_CASE` variables are improperly flagged for yoda violation (`SIM300`) ([#9164](https://github.com/astral-sh/ruff/pull/9164))
- \[`flake8-pyi`\] Cover ParamSpecs and TypeVarTuples (`PYI018`) ([#9198](https://github.com/astral-sh/ruff/pull/9198))
- \[`flake8-bugbear`\] Add fix for `zip-without-explicit-strict` (`B905`) ([#9176](https://github.com/astral-sh/ruff/pull/9176))
- Add fix to automatically remove `print` and `pprint` statements (`T201`, `T203`) ([#9208](https://github.com/astral-sh/ruff/pull/9208))
- Prefer `Never` to `NoReturn` in auto-typing in Python >= 3.11 (`ANN201`) ([#9213](https://github.com/astral-sh/ruff/pull/9213))
### Formatter
- `can_omit_optional_parentheses`: Exit early for unparenthesized expressions ([#9125](https://github.com/astral-sh/ruff/pull/9125))
- Fix `dynamic` mode with doctests so that it doesn't exceed configured line width ([#9129](https://github.com/astral-sh/ruff/pull/9129))
- Fix `can_omit_optional_parentheses` for expressions with a right most fstring ([#9124](https://github.com/astral-sh/ruff/pull/9124))
- Add `target_version` to formatter options ([#9220](https://github.com/astral-sh/ruff/pull/9220))
### CLI
- Update `ruff format --check` to display message for already formatted files ([#9153](https://github.com/astral-sh/ruff/pull/9153))
### Bug fixes
- Reverse order of arguments for `operator.contains` ([#9192](https://github.com/astral-sh/ruff/pull/9192))
- Iterate over lambdas in deferred type annotations ([#9175](https://github.com/astral-sh/ruff/pull/9175))
- Fix panic in `D208` with multibyte indent ([#9147](https://github.com/astral-sh/ruff/pull/9147))
- Add support for `NoReturn` in auto-return-typing ([#9206](https://github.com/astral-sh/ruff/pull/9206))
- Allow removal of `typing` from `exempt-modules` ([#9214](https://github.com/astral-sh/ruff/pull/9214))
- Avoid `mutable-class-default` violations for Pydantic subclasses ([#9187](https://github.com/astral-sh/ruff/pull/9187))
- Fix dropped union expressions for piped non-types in `PYI055` autofix ([#9161](https://github.com/astral-sh/ruff/pull/9161))
- Enable annotation quoting for multi-line expressions ([#9142](https://github.com/astral-sh/ruff/pull/9142))
- Deduplicate edits when quoting annotations ([#9140](https://github.com/astral-sh/ruff/pull/9140))
- Prevent invalid utf8 indexing in cell magic detection ([#9146](https://github.com/astral-sh/ruff/pull/9146))
- Avoid nested quotations in auto-quoting fix ([#9168](https://github.com/astral-sh/ruff/pull/9168))
- Add base-class inheritance detection to flake8-django rules ([#9151](https://github.com/astral-sh/ruff/pull/9151))
- Avoid `asyncio-dangling-task` violations on shadowed bindings ([#9215](https://github.com/astral-sh/ruff/pull/9215))
### Documentation
- Fix blog post URL in changelog ([#9119](https://github.com/astral-sh/ruff/pull/9119))
- Add error suppression hint for multi-line strings ([#9205](https://github.com/astral-sh/ruff/pull/9205))
- Fix typo in SemanticModel.parent_expression docstring ([#9167](https://github.com/astral-sh/ruff/pull/9167))
- Document link between import sorting and formatter ([#9117](https://github.com/astral-sh/ruff/pull/9117))
## 0.1.8
This release includes opt-in support for formatting Python snippets within
docstrings via the `docstring-code-format` setting.
[Check out the blog post](https://astral.sh/blog/ruff-v0.1.8) for more details!
### Preview features
- Add `"preserve"` quote-style to mimic Black's skip-string-normalization ([#8822](https://github.com/astral-sh/ruff/pull/8822))
- Implement `prefer_splitting_right_hand_side_of_assignments` preview style ([#8943](https://github.com/astral-sh/ruff/pull/8943))
- \[`pycodestyle`\] Add fix for `unexpected-spaces-around-keyword-parameter-equals` ([#9072](https://github.com/astral-sh/ruff/pull/9072))
- \[`pycodestyle`\] Add fix for comment-related whitespace rules ([#9075](https://github.com/astral-sh/ruff/pull/9075))
- \[`pycodestyle`\] Allow `sys.path` modifications between imports ([#9047](https://github.com/astral-sh/ruff/pull/9047))
- \[`refurb`\] Implement `hashlib-digest-hex` (`FURB181`) ([#9077](https://github.com/astral-sh/ruff/pull/9077))
### Rule changes
- Allow `flake8-type-checking` rules to automatically quote runtime-evaluated references ([#6001](https://github.com/astral-sh/ruff/pull/6001))
- Allow transparent cell magics in Jupyter Notebooks ([#8911](https://github.com/astral-sh/ruff/pull/8911))
- \[`flake8-annotations`\] Avoid `ANN2xx` fixes for abstract methods with empty bodies ([#9034](https://github.com/astral-sh/ruff/pull/9034))
- \[`flake8-self`\] Ignore underscore references in type annotations ([#9036](https://github.com/astral-sh/ruff/pull/9036))
- \[`pep8-naming`\] Allow class names when `apps.get_model` is a non-string ([#9065](https://github.com/astral-sh/ruff/pull/9065))
- \[`pycodestyle`\] Allow `matplotlib.use` calls to intersperse imports ([#9094](https://github.com/astral-sh/ruff/pull/9094))
- \[`pyflakes`\] Support fixing unused assignments in tuples by renaming variables (`F841`) ([#9107](https://github.com/astral-sh/ruff/pull/9107))
- \[`pylint`\] Add fix for `subprocess-run-without-check` (`PLW1510`) ([#6708](https://github.com/astral-sh/ruff/pull/6708))
### Formatter
- Add `docstring-code-format` knob to enable docstring snippet formatting ([#8854](https://github.com/astral-sh/ruff/pull/8854))
- Use double quotes for all docstrings, including single-quoted docstrings ([#9020](https://github.com/astral-sh/ruff/pull/9020))
- Implement "dynamic" line width mode for docstring code formatting ([#9098](https://github.com/astral-sh/ruff/pull/9098))
- Support reformatting Markdown code blocks ([#9030](https://github.com/astral-sh/ruff/pull/9030))
- add support for formatting reStructuredText code snippets ([#9003](https://github.com/astral-sh/ruff/pull/9003))
- Avoid trailing comma for single-argument with positional separator ([#9076](https://github.com/astral-sh/ruff/pull/9076))
- Fix handling of trailing target comment ([#9051](https://github.com/astral-sh/ruff/pull/9051))
### CLI
- Hide unsafe fix suggestions when explicitly disabled ([#9095](https://github.com/astral-sh/ruff/pull/9095))
- Add SARIF support to `--output-format` ([#9078](https://github.com/astral-sh/ruff/pull/9078))
### Bug fixes
- Apply unnecessary index rule prior to enumerate rewrite ([#9012](https://github.com/astral-sh/ruff/pull/9012))
- \[`flake8-err-msg`\] Allow `EM` fixes even if `msg` variable is defined ([#9059](https://github.com/astral-sh/ruff/pull/9059))
- \[`flake8-pie`\] Prevent keyword arguments duplication ([#8450](https://github.com/astral-sh/ruff/pull/8450))
- \[`flake8-pie`\] Respect trailing comma in `unnecessary-dict-kwargs` (`PIE804`) ([#9015](https://github.com/astral-sh/ruff/pull/9015))
- \[`flake8-raise`\] Avoid removing parentheses on ctypes.WinError ([#9027](https://github.com/astral-sh/ruff/pull/9027))
- \[`isort`\] Avoid invalid combination of `force-sort-within-types` and `lines-between-types` ([#9041](https://github.com/astral-sh/ruff/pull/9041))
- \[`isort`\] Ensure that from-style imports are always ordered first in `__future__` ([#9039](https://github.com/astral-sh/ruff/pull/9039))
- \[`pycodestyle`\] Allow tab indentation before keyword ([#9099](https://github.com/astral-sh/ruff/pull/9099))
- \[`pylint`\] Ignore `@overrides` and `@overloads` for `too-many-positional` ([#9000](https://github.com/astral-sh/ruff/pull/9000))
- \[`pyupgrade`\] Enable `printf-string-formatting` fix with comments on right-hand side ([#9037](https://github.com/astral-sh/ruff/pull/9037))
- \[`refurb`\] Make `math-constant` (`FURB152`) rule more targeted ([#9054](https://github.com/astral-sh/ruff/pull/9054))
- \[`refurb`\] Support floating-point base in `redundant-log-base` (`FURB163`) ([#9100](https://github.com/astral-sh/ruff/pull/9100))
- \[`ruff`\] Detect `unused-asyncio-dangling-task` (`RUF006`) on unused assignments ([#9060](https://github.com/astral-sh/ruff/pull/9060))
## 0.1.7
### Preview features

View File

@@ -326,18 +326,16 @@ We use an experimental in-house tool for managing releases.
- Often labels will be missing from pull requests they will need to be manually organized into the proper section
- Changes should be edited to be user-facing descriptions, avoiding internal details
1. Highlight any breaking changes in `BREAKING_CHANGES.md`
1. Run `cargo check`. This should update the lock file with new versions.
1. Create a pull request with the changelog and version updates
1. Merge the PR
1. Run the [release workflow](https://github.com/astral-sh/ruff/actions/workflows/release.yaml) with:
- The new version number (without starting `v`)
- The commit hash of the merged release pull request on `main`
1. Run the release workflow with the version number (without starting `v`) as input. Make sure
main has your merged PR as last commit
1. The release workflow will do the following:
1. Build all the assets. If this fails (even though we tested in step 4), we haven't tagged or
uploaded anything, you can restart after pushing a fix.
1. Upload to PyPI.
1. Create and push the Git tag (as extracted from `pyproject.toml`). We create the Git tag only
after building the wheels and uploading to PyPI, since we can't delete or modify the tag ([#4468](https://github.com/astral-sh/ruff/issues/4468)).
after building the wheels and uploading to PyPI, since we can't delete or modify the tag ([#4468](https://github.com/charliermarsh/ruff/issues/4468)).
1. Attach artifacts to draft GitHub release
1. Trigger downstream repositories. This can fail non-catastrophically, as we can run any
downstream jobs manually if needed.
@@ -346,10 +344,7 @@ We use an experimental in-house tool for managing releases.
1. Copy the changelog for the release into the GitHub release
- See previous releases for formatting of section headers
1. Generate the contributor list with `rooster contributors` and add to the release notes
1. If needed, [update the schemastore](https://github.com/astral-sh/ruff/blob/main/scripts/update_schemastore.py).
1. One can determine if an update is needed when
`git diff old-version-tag new-version-tag -- ruff.schema.json` returns a non-empty diff.
1. Once run successfully, you should follow the link in the output to create a PR.
1. If needed, [update the schemastore](https://github.com/charliermarsh/ruff/blob/main/scripts/update_schemastore.py)
1. If needed, update the `ruff-lsp` and `ruff-vscode` repositories.
## Ecosystem CI
@@ -561,10 +556,10 @@ examples.
#### Linux
Install `perf` and build `ruff_benchmark` with the `profiling` profile and then run it with perf
Install `perf` and build `ruff_benchmark` with the `release-debug` profile and then run it with perf
```shell
cargo bench -p ruff_benchmark --no-run --profile=profiling && perf record --call-graph dwarf -F 9999 cargo bench -p ruff_benchmark --profile=profiling -- --profile-time=1
cargo bench -p ruff_benchmark --no-run --profile=release-debug && perf record --call-graph dwarf -F 9999 cargo bench -p ruff_benchmark --profile=release-debug -- --profile-time=1
```
You can also use the `ruff_dev` launcher to run `ruff check` multiple times on a repository to
@@ -572,8 +567,8 @@ gather enough samples for a good flamegraph (change the 999, the sample rate, an
of checks, to your liking)
```shell
cargo build --bin ruff_dev --profile=profiling
perf record -g -F 999 target/profiling/ruff_dev repeat --repeat 30 --exit-zero --no-cache path/to/cpython > /dev/null
cargo build --bin ruff_dev --profile=release-debug
perf record -g -F 999 target/release-debug/ruff_dev repeat --repeat 30 --exit-zero --no-cache path/to/cpython > /dev/null
```
Then convert the recorded profile
@@ -603,7 +598,7 @@ cargo install cargo-instruments
Then run the profiler with
```shell
cargo instruments -t time --bench linter --profile profiling -p ruff_benchmark -- --profile-time=1
cargo instruments -t time --bench linter --profile release-debug -p ruff_benchmark -- --profile-time=1
```
- `-t`: Specifies what to profile. Useful options are `time` to profile the wall time and `alloc`

153
Cargo.lock generated
View File

@@ -123,9 +123,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.76"
version = "1.0.75"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59d2a3357dde987206219e78ecfbbb6e8dad06cbb65292758d3270e6254f7355"
checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6"
[[package]]
name = "argfile"
@@ -234,9 +234,9 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
[[package]]
name = "cachedir"
version = "0.3.1"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4703f3937077db8fa35bee3c8789343c1aec2585f0146f09d658d4ccc0e8d873"
checksum = "e236bf5873ea57ec2877445297f4da008916bfae51567131acfc54a073d694f3"
dependencies = [
"tempfile",
]
@@ -434,10 +434,11 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
[[package]]
name = "colored"
version = "2.1.0"
version = "2.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8"
checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6"
dependencies = [
"is-terminal",
"lazy_static",
"windows-sys 0.48.0",
]
@@ -735,9 +736,9 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f"
[[package]]
name = "env_logger"
version = "0.10.1"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95b3f3e67048839cb0d0781f445682a35113da7121f7c949db0e2be96a4fbece"
checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0"
dependencies = [
"humantime",
"is-terminal",
@@ -808,7 +809,7 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
[[package]]
name = "flake8-to-ruff"
version = "0.1.9"
version = "0.1.7"
dependencies = [
"anyhow",
"clap",
@@ -817,7 +818,7 @@ dependencies = [
"itertools 0.11.0",
"log",
"once_cell",
"pep440_rs 0.4.0",
"pep440_rs",
"pretty_assertions",
"regex",
"ruff_linter",
@@ -827,7 +828,7 @@ dependencies = [
"serde_json",
"strum",
"strum_macros",
"toml",
"toml 0.7.8",
]
[[package]]
@@ -997,16 +998,17 @@ dependencies = [
[[package]]
name = "ignore"
version = "0.4.21"
version = "0.4.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "747ad1b4ae841a78e8aba0d63adbfbeaea26b517b63705d47856b73015d27060"
checksum = "dbe7873dab538a9a44ad79ede1faf5f30d49f9a5c883ddbab48bce81b64b7492"
dependencies = [
"crossbeam-deque",
"globset",
"lazy_static",
"log",
"memchr",
"regex-automata 0.4.3",
"regex",
"same-file",
"thread_local",
"walkdir",
"winapi-util",
]
@@ -1480,9 +1482,9 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3"
[[package]]
name = "once_cell"
version = "1.19.0"
version = "1.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
[[package]]
name = "oorandom"
@@ -1603,18 +1605,6 @@ dependencies = [
"unicode-width",
]
[[package]]
name = "pep440_rs"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0c29f9c43de378b4e4e0cd7dbcce0e5cfb80443de8c05620368b2948bc936a1"
dependencies = [
"once_cell",
"regex",
"serde",
"unicode-width",
]
[[package]]
name = "pep508_rs"
version = "0.2.1"
@@ -1622,7 +1612,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c0713d7bb861ca2b7d4c50a38e1f31a4b63a2e2df35ef1e5855cc29e108453e2"
dependencies = [
"once_cell",
"pep440_rs 0.3.12",
"pep440_rs",
"regex",
"serde",
"thiserror",
@@ -1804,9 +1794,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
version = "1.0.71"
version = "1.0.70"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75cb1540fadbd5b8fbccc4dddad2734eba435053f725621c070711a14bb5f4b8"
checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b"
dependencies = [
"unicode-ident",
]
@@ -1818,10 +1808,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46d4a5e69187f23a29f8aa0ea57491d104ba541bc55f76552c2a74962aa20e04"
dependencies = [
"indexmap",
"pep440_rs 0.3.12",
"pep440_rs",
"pep508_rs",
"serde",
"toml",
"toml 0.8.2",
]
[[package]]
@@ -2073,7 +2063,7 @@ dependencies = [
[[package]]
name = "ruff_cli"
version = "0.1.9"
version = "0.1.7"
dependencies = [
"annotate-snippets 0.9.2",
"anyhow",
@@ -2165,7 +2155,7 @@ dependencies = [
"strum",
"strum_macros",
"tempfile",
"toml",
"toml 0.7.8",
"tracing",
"tracing-indicatif",
"tracing-subscriber",
@@ -2209,7 +2199,7 @@ dependencies = [
[[package]]
name = "ruff_linter"
version = "0.1.9"
version = "0.1.7"
dependencies = [
"aho-corasick",
"annotate-snippets 0.9.2",
@@ -2232,7 +2222,7 @@ dependencies = [
"once_cell",
"path-absolutize",
"pathdiff",
"pep440_rs 0.4.0",
"pep440_rs",
"pretty_assertions",
"pyproject-toml",
"quick-junit",
@@ -2265,7 +2255,7 @@ dependencies = [
"tempfile",
"test-case",
"thiserror",
"toml",
"toml 0.7.8",
"typed-arena",
"unicode-width",
"unicode_names2",
@@ -2462,7 +2452,7 @@ dependencies = [
[[package]]
name = "ruff_shrinking"
version = "0.1.9"
version = "0.1.7"
dependencies = [
"anyhow",
"clap",
@@ -2538,7 +2528,7 @@ dependencies = [
"log",
"once_cell",
"path-absolutize",
"pep440_rs 0.4.0",
"pep440_rs",
"regex",
"ruff_cache",
"ruff_formatter",
@@ -2553,7 +2543,7 @@ dependencies = [
"shellexpand",
"strum",
"tempfile",
"toml",
"toml 0.7.8",
]
[[package]]
@@ -2741,9 +2731,9 @@ dependencies = [
[[package]]
name = "serde_spanned"
version = "0.6.5"
version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1"
checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186"
dependencies = [
"serde",
]
@@ -2968,9 +2958,9 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76"
[[package]]
name = "test-case"
version = "3.3.1"
version = "3.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb2550dd13afcd286853192af8601920d959b14c401fcece38071d53bf0768a8"
checksum = "c8f1e820b7f1d95a0cdbf97a5df9de10e1be731983ab943e56703ac1b8e9d425"
dependencies = [
"test-case-macros",
]
@@ -3003,18 +2993,18 @@ dependencies = [
[[package]]
name = "thiserror"
version = "1.0.51"
version = "1.0.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f11c217e1416d6f036b870f14e0413d480dbf28edbee1f877abaf0206af43bb7"
checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.51"
version = "1.0.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "01742297787513b79cf8e29d1056ede1313e2420b7b3b15d0a768b4921f549df"
checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8"
dependencies = [
"proc-macro2",
"quote",
@@ -3103,30 +3093,55 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "toml"
version = "0.8.8"
version = "0.7.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1a195ec8c9da26928f773888e0742ca3ca1040c6cd859c919c9f59c1954ab35"
checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257"
dependencies = [
"serde",
"serde_spanned",
"toml_datetime",
"toml_edit",
"toml_edit 0.19.15",
]
[[package]]
name = "toml"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "185d8ab0dfbb35cf1399a6344d8484209c088f75f8f68230da55d48d95d43e3d"
dependencies = [
"serde",
"serde_spanned",
"toml_datetime",
"toml_edit 0.20.2",
]
[[package]]
name = "toml_datetime"
version = "0.6.5"
version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1"
checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b"
dependencies = [
"serde",
]
[[package]]
name = "toml_edit"
version = "0.21.0"
version = "0.19.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03"
checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
dependencies = [
"indexmap",
"serde",
"serde_spanned",
"toml_datetime",
"winnow",
]
[[package]]
name = "toml_edit"
version = "0.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338"
dependencies = [
"indexmap",
"serde",
@@ -3170,9 +3185,9 @@ dependencies = [
[[package]]
name = "tracing-indicatif"
version = "0.3.6"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "069580424efe11d97c3fef4197fa98c004fa26672cc71ad8770d224e23b1951d"
checksum = "57e05fe4a1c906d94b275d8aeb8ff8b9deaca502aeb59ae8ab500a92b8032ac8"
dependencies = [
"indicatif",
"tracing",
@@ -3292,9 +3307,9 @@ checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c"
[[package]]
name = "unicode_names2"
version = "1.2.1"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac64ef2f016dc69dfa8283394a70b057066eb054d5fcb6b9eb17bd2ec5097211"
checksum = "5d5506ae2c3c1ccbdf468e52fc5ef536c2ccd981f01273a4cb81aa61021f3a5f"
dependencies = [
"phf",
"unicode_names2_generator",
@@ -3302,9 +3317,9 @@ dependencies = [
[[package]]
name = "unicode_names2_generator"
version = "1.2.1"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "013f6a731e80f3930de580e55ba41dfa846de4e0fdee4a701f97989cb1597d6a"
checksum = "b6dfc680313e95bc6637fa278cd7a22390c3c2cd7b8b2bd28755bc6c0fc811e7"
dependencies = [
"getopts",
"log",
@@ -3473,9 +3488,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-futures"
version = "0.4.39"
version = "0.4.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12"
checksum = "9afec9963e3d0994cac82455b2b3502b81a7f40f9a0d32181f7528d9f4b43e02"
dependencies = [
"cfg-if",
"js-sys",
@@ -3514,9 +3529,9 @@ checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f"
[[package]]
name = "wasm-bindgen-test"
version = "0.3.39"
version = "0.3.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2cf9242c0d27999b831eae4767b2a146feb0b27d332d553e605864acd2afd403"
checksum = "c6433b7c56db97397842c46b67e11873eda263170afeb3a2dc74a7cb370fee0d"
dependencies = [
"console_error_panic_hook",
"js-sys",
@@ -3528,9 +3543,9 @@ dependencies = [
[[package]]
name = "wasm-bindgen-test-macro"
version = "0.3.39"
version = "0.3.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "794645f5408c9a039fd09f4d113cdfb2e7eba5ff1956b07bcf701cf4b394fe89"
checksum = "493fcbab756bb764fa37e6bee8cec2dd709eb4273d06d0c282a5e74275ded735"
dependencies = [
"proc-macro2",
"quote",

View File

@@ -12,24 +12,24 @@ authors = ["Charlie Marsh <charlie.r.marsh@gmail.com>"]
license = "MIT"
[workspace.dependencies]
anyhow = { version = "1.0.76" }
anyhow = { version = "1.0.69" }
bitflags = { version = "2.4.1" }
chrono = { version = "0.4.31", default-features = false, features = ["clock"] }
clap = { version = "4.4.7", features = ["derive"] }
colored = { version = "2.1.0" }
colored = { version = "2.0.0" }
filetime = { version = "0.2.23" }
glob = { version = "0.3.1" }
globset = { version = "0.4.14" }
ignore = { version = "0.4.21" }
ignore = { version = "0.4.20" }
insta = { version = "1.34.0", feature = ["filters", "glob"] }
is-macro = { version = "0.3.1" }
itertools = { version = "0.11.0" }
libcst = { version = "1.1.0", default-features = false }
log = { version = "0.4.17" }
memchr = { version = "2.6.4" }
once_cell = { version = "1.19.0" }
once_cell = { version = "1.17.1" }
path-absolutize = { version = "3.1.1" }
proc-macro2 = { version = "1.0.71" }
proc-macro2 = { version = "1.0.70" }
quote = { version = "1.0.23" }
regex = { version = "1.10.2" }
rustc-hash = { version = "1.1.0" }
@@ -43,14 +43,14 @@ static_assertions = "1.1.0"
strum = { version = "0.25.0", features = ["strum_macros"] }
strum_macros = { version = "0.25.3" }
syn = { version = "2.0.40" }
test-case = { version = "3.3.1" }
thiserror = { version = "1.0.51" }
toml = { version = "0.8.8" }
test-case = { version = "3.2.1" }
thiserror = { version = "1.0.50" }
toml = { version = "0.7.8" }
tracing = { version = "0.1.40" }
tracing-indicatif = { version = "0.3.6" }
tracing-indicatif = { version = "0.3.4" }
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
unicode-ident = { version = "1.0.12" }
unicode_names2 = { version = "1.2.1" }
unicode_names2 = { version = "1.2.0" }
unicode-width = { version = "0.1.11" }
uuid = { version = "1.6.1", features = ["v4", "fast-rng", "macro-diagnostics", "js"] }
wsl = { version = "0.1.0" }
@@ -88,20 +88,7 @@ rc_mutex = "warn"
rest_pat_in_fully_bound_structs = "warn"
[profile.release]
# Note that we set these explicitly, and these values
# were chosen based on a trade-off between compile times
# and runtime performance[1].
#
# [1]: https://github.com/astral-sh/ruff/pull/9031
lto = "thin"
codegen-units = 16
# Some crates don't change as much but benefit more from
# more expensive optimization passes, so we selectively
# decrease codegen-units in some cases.
[profile.release.package.ruff_python_parser]
codegen-units = 1
[profile.release.package.ruff_python_ast]
lto = "fat"
codegen-units = 1
[profile.dev.package.insta]
@@ -115,8 +102,8 @@ opt-level = 3
[profile.dev.package.ruff_python_parser]
opt-level = 1
# Use the `--profile profiling` flag to show symbols in release mode.
# e.g. `cargo build --profile profiling`
[profile.profiling]
# Use the `--profile release-debug` flag to show symbols in release mode.
# e.g. `cargo build --profile release-debug`
[profile.release-debug]
inherits = "release"
debug = 1

View File

@@ -150,7 +150,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com/) hook via [`ruff
```yaml
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.1.9
rev: v0.1.7
hooks:
# Run the linter.
- id: ruff
@@ -194,25 +194,20 @@ exclude = [
".git",
".git-rewrite",
".hg",
".ipynb_checkpoints",
".mypy_cache",
".nox",
".pants.d",
".pyenv",
".pytest_cache",
".pytype",
".ruff_cache",
".svn",
".tox",
".venv",
".vscode",
"__pypackages__",
"_build",
"buck-out",
"build",
"dist",
"node_modules",
"site-packages",
"venv",
]

View File

@@ -1,6 +1,6 @@
[package]
name = "flake8-to-ruff"
version = "0.1.9"
version = "0.1.7"
description = """
Convert Flake8 configuration files to Ruff configuration files.
"""
@@ -23,7 +23,7 @@ configparser = { version = "3.0.3" }
itertools = { workspace = true }
log = { workspace = true }
once_cell = { workspace = true }
pep440_rs = { version = "0.4.0", features = ["serde"] }
pep440_rs = { version = "0.3.12", features = ["serde"] }
regex = { workspace = true }
rustc-hash = { workspace = true }
serde = { workspace = true }

View File

@@ -2,7 +2,7 @@ use ruff_benchmark::criterion::{
criterion_group, criterion_main, BenchmarkGroup, BenchmarkId, Criterion, Throughput,
};
use ruff_benchmark::{TestCase, TestFile, TestFileDownloadError};
use ruff_linter::linter::{lint_only, ParseSource};
use ruff_linter::linter::lint_only;
use ruff_linter::rule_selector::PreviewOptions;
use ruff_linter::settings::rule_table::RuleTable;
use ruff_linter::settings::types::PreviewMode;
@@ -10,7 +10,6 @@ use ruff_linter::settings::{flags, LinterSettings};
use ruff_linter::source_kind::SourceKind;
use ruff_linter::{registry::Rule, RuleSelector};
use ruff_python_ast::PySourceType;
use ruff_python_parser::{lexer, parse_program_tokens, Mode};
#[cfg(target_os = "windows")]
#[global_allocator]
@@ -54,13 +53,7 @@ fn benchmark_linter(mut group: BenchmarkGroup, settings: &LinterSettings) {
BenchmarkId::from_parameter(case.name()),
&case,
|b, case| {
// Tokenize the source.
let tokens = lexer::lex(case.code(), Mode::Module).collect::<Vec<_>>();
// Parse the source.
let ast =
parse_program_tokens(tokens.clone(), case.code(), case.name(), false).unwrap();
let kind = SourceKind::Python(case.code().to_string());
b.iter(|| {
let path = case.path();
let result = lint_only(
@@ -68,12 +61,8 @@ fn benchmark_linter(mut group: BenchmarkGroup, settings: &LinterSettings) {
None,
settings,
flags::Noqa::Enabled,
&SourceKind::Python(case.code().to_string()),
&kind,
PySourceType::from(path.as_path()),
ParseSource::Precomputed {
tokens: &tokens,
ast: &ast,
},
);
// Assert that file contains no parse errors

View File

@@ -1,6 +1,6 @@
[package]
name = "ruff_cli"
version = "0.1.9"
version = "0.1.7"
publish = false
authors = { workspace = true }
edition = { workspace = true }
@@ -33,7 +33,7 @@ anyhow = { workspace = true }
argfile = { version = "0.1.6" }
bincode = { version = "1.3.3" }
bitflags = { workspace = true }
cachedir = { version = "0.3.1" }
cachedir = { version = "0.3.0" }
chrono = { workspace = true }
clap = { workspace = true, features = ["derive", "env"] }
clap_complete_command = { version = "0.5.1" }

View File

@@ -515,7 +515,7 @@ impl<'a> FormatResults<'a> {
if changed > 0 && unchanged > 0 {
writeln!(
f,
"{} file{} {}, {} file{} {}",
"{} file{} {}, {} file{} left unchanged",
changed,
if changed == 1 { "" } else { "s" },
match self.mode {
@@ -524,10 +524,6 @@ impl<'a> FormatResults<'a> {
},
unchanged,
if unchanged == 1 { "" } else { "s" },
match self.mode {
FormatMode::Write => "left unchanged",
FormatMode::Check | FormatMode::Diff => "already formatted",
},
)
} else if changed > 0 {
writeln!(
@@ -543,13 +539,9 @@ impl<'a> FormatResults<'a> {
} else if unchanged > 0 {
writeln!(
f,
"{} file{} {}",
"{} file{} left unchanged",
unchanged,
if unchanged == 1 { "" } else { "s" },
match self.mode {
FormatMode::Write => "left unchanged",
FormatMode::Check | FormatMode::Diff => "already formatted",
},
)
} else {
Ok(())

View File

@@ -12,7 +12,7 @@ use rustc_hash::FxHashMap;
use crate::cache::{Cache, FileCacheKey, LintCacheData};
use ruff_diagnostics::Diagnostic;
use ruff_linter::linter::{lint_fix, lint_only, FixTable, FixerResult, LinterResult, ParseSource};
use ruff_linter::linter::{lint_fix, lint_only, FixTable, FixerResult, LinterResult};
use ruff_linter::logging::DisplayParseError;
use ruff_linter::message::Message;
use ruff_linter::pyproject_toml::lint_pyproject_toml;
@@ -303,28 +303,12 @@ pub(crate) fn lint_path(
(result, fixed)
} else {
// If we fail to fix, lint the original source code.
let result = lint_only(
path,
package,
settings,
noqa,
&source_kind,
source_type,
ParseSource::None,
);
let result = lint_only(path, package, settings, noqa, &source_kind, source_type);
let fixed = FxHashMap::default();
(result, fixed)
}
} else {
let result = lint_only(
path,
package,
settings,
noqa,
&source_kind,
source_type,
ParseSource::None,
);
let result = lint_only(path, package, settings, noqa, &source_kind, source_type);
let fixed = FxHashMap::default();
(result, fixed)
};
@@ -460,7 +444,6 @@ pub(crate) fn lint_stdin(
noqa,
&source_kind,
source_type,
ParseSource::None,
);
let fixed = FxHashMap::default();
@@ -479,7 +462,6 @@ pub(crate) fn lint_stdin(
noqa,
&source_kind,
source_type,
ParseSource::None,
);
let fixed = FxHashMap::default();
(result, fixed)

View File

@@ -139,99 +139,6 @@ if condition:
Ok(())
}
#[test]
fn docstring_options() -> Result<()> {
let tempdir = TempDir::new()?;
let ruff_toml = tempdir.path().join("ruff.toml");
fs::write(
&ruff_toml,
r#"
[format]
docstring-code-format = true
docstring-code-line-length = 20
"#,
)?;
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.args(["format", "--config"])
.arg(&ruff_toml)
.arg("-")
.pass_stdin(r#"
def f(x):
'''
Something about `f`. And an example:
.. code-block:: python
foo, bar, quux = this_is_a_long_line(lion, hippo, lemur, bear)
Another example:
```py
foo, bar, quux = this_is_a_long_line(lion, hippo, lemur, bear)
```
And another:
>>> foo, bar, quux = this_is_a_long_line(lion, hippo, lemur, bear)
'''
pass
"#), @r###"
success: true
exit_code: 0
----- stdout -----
def f(x):
"""
Something about `f`. And an example:
.. code-block:: python
(
foo,
bar,
quux,
) = this_is_a_long_line(
lion,
hippo,
lemur,
bear,
)
Another example:
```py
(
foo,
bar,
quux,
) = this_is_a_long_line(
lion,
hippo,
lemur,
bear,
)
```
And another:
>>> (
... foo,
... bar,
... quux,
... ) = this_is_a_long_line(
... lion,
... hippo,
... lemur,
... bear,
... )
"""
pass
----- stderr -----
"###);
Ok(())
}
#[test]
fn mixed_line_endings() -> Result<()> {
let tempdir = TempDir::new()?;
@@ -255,7 +162,7 @@ fn mixed_line_endings() -> Result<()> {
----- stdout -----
----- stderr -----
2 files already formatted
2 files left unchanged
"###);
Ok(())
}
@@ -328,60 +235,6 @@ OTHER = "OTHER"
Ok(())
}
#[test]
fn messages() -> Result<()> {
let tempdir = TempDir::new()?;
fs::write(
tempdir.path().join("main.py"),
r#"
from test import say_hy
if __name__ == "__main__":
say_hy("dear Ruff contributor")
"#,
)?;
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.current_dir(tempdir.path())
.args(["format", "--no-cache", "--isolated", "--check"])
.arg("main.py"), @r###"
success: false
exit_code: 1
----- stdout -----
Would reformat: main.py
1 file would be reformatted
----- stderr -----
"###);
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.current_dir(tempdir.path())
.args(["format", "--no-cache", "--isolated"])
.arg("main.py"), @r###"
success: true
exit_code: 0
----- stdout -----
1 file reformatted
----- stderr -----
"###);
assert_cmd_snapshot!(Command::new(get_cargo_bin(BIN_NAME))
.current_dir(tempdir.path())
.args(["format", "--no-cache", "--isolated"])
.arg("main.py"), @r###"
success: true
exit_code: 0
----- stdout -----
1 file left unchanged
----- stderr -----
"###);
Ok(())
}
#[test]
fn force_exclude() -> Result<()> {
let tempdir = TempDir::new()?;
@@ -930,7 +783,7 @@ fn test_diff() {
----- stderr -----
2 files would be reformatted, 1 file already formatted
2 files would be reformatted, 1 file left unchanged
"###);
});
}

View File

@@ -7,7 +7,7 @@ use ruff_text_size::{Ranged, TextRange, TextSize};
/// A text edit to be applied to a source file. Inserts, deletes, or replaces
/// content at a given location.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Edit {
/// The start location of the edit.

View File

@@ -1,6 +1,6 @@
[package]
name = "ruff_linter"
version = "0.1.9"
version = "0.1.7"
publish = false
authors = { workspace = true }
edition = { workspace = true }
@@ -52,7 +52,7 @@ path-absolutize = { workspace = true, features = [
"use_unix_paths_on_wasm",
] }
pathdiff = { version = "0.2.1" }
pep440_rs = { version = "0.4.0", features = ["serde"] }
pep440_rs = { version = "0.3.12", features = ["serde"] }
pyproject-toml = { version = "0.8.1" }
quick-junit = { version = "0.3.5" }
regex = { workspace = true }

View File

@@ -182,50 +182,3 @@ class Foo(abc.ABC):
return 1
else:
return 1.5
def func(x: int):
try:
pass
except:
return 2
def func(x: int):
try:
pass
except:
return 2
else:
return 3
def func(x: int):
if not x:
raise ValueError
else:
raise TypeError
def func(x: int):
if not x:
raise ValueError
else:
return 1
from typing import overload
@overload
def overloaded(i: int) -> "int":
...
@overload
def overloaded(i: "str") -> "str":
...
def overloaded(i):
return i

View File

@@ -127,21 +127,3 @@ class MultipleConsecutiveFields(models.Model):
pass
middle_name = models.CharField(max_length=32)
class BaseModel(models.Model):
pass
class StrBeforeFieldInheritedModel(BaseModel):
"""Model with `__str__` before fields."""
class Meta:
verbose_name = "test"
verbose_name_plural = "tests"
def __str__(self):
return "foobar"
first_name = models.CharField(max_length=32)

View File

@@ -1,13 +1,8 @@
import typing
import typing_extensions
from typing import TypeVar
from typing_extensions import ParamSpec, TypeVarTuple
_T = typing.TypeVar("_T")
_Ts = typing_extensions.TypeVarTuple("_Ts")
_P = ParamSpec("_P")
_P2 = typing.ParamSpec("_P2")
_Ts2 = TypeVarTuple("_Ts2")
_P = TypeVar("_P")
# OK
_UsedTypeVar = TypeVar("_UsedTypeVar")

View File

@@ -1,13 +1,8 @@
import typing
import typing_extensions
from typing import TypeVar
from typing_extensions import ParamSpec, TypeVarTuple
_T = typing.TypeVar("_T")
_Ts = typing_extensions.TypeVarTuple("_Ts")
_P = ParamSpec("_P")
_P2 = typing.ParamSpec("_P2")
_Ts2 = TypeVarTuple("_Ts2")
_P = TypeVar("_P")
# OK
_UsedTypeVar = TypeVar("_UsedTypeVar")

View File

@@ -37,28 +37,3 @@ def func():
# PYI055
x: Union[type[requests_mock.Mocker], type[httpretty], type[str]] = requests_mock.Mocker
def convert_union(union: UnionType) -> _T | None:
converters: tuple[
type[_T] | type[Converter[_T]] | Converter[_T] | Callable[[str], _T], ... # PYI055
] = union.__args__
...
def convert_union(union: UnionType) -> _T | None:
converters: tuple[
Union[type[_T] | type[Converter[_T]] | Converter[_T] | Callable[[str], _T]], ... # PYI055
] = union.__args__
...
def convert_union(union: UnionType) -> _T | None:
converters: tuple[
Union[type[_T] | type[Converter[_T]]] | Converter[_T] | Callable[[str], _T], ... # PYI055
] = union.__args__
...
def convert_union(union: UnionType) -> _T | None:
converters: tuple[
Union[type[_T] | type[Converter[_T]] | str] | Converter[_T] | Callable[[str], _T], ... # PYI055
] = union.__args__
...

View File

@@ -1,5 +1,6 @@
# Errors
"yoda" == compare # SIM300
"yoda" == compare # SIM300
42 == age # SIM300
("a", "b") == compare # SIM300
"yoda" <= compare # SIM300
@@ -12,17 +13,10 @@ YODA > age # SIM300
YODA >= age # SIM300
JediOrder.YODA == age # SIM300
0 < (number - 100) # SIM300
SomeClass().settings.SOME_CONSTANT_VALUE > (60 * 60) # SIM300
B<A[0][0]or B
B or(B)<A[0][0]
# Errors in preview
['upper'] == UPPER_LIST
{} == DummyHandler.CONFIG
# Errors in stable
UPPER_LIST == ['upper']
DummyHandler.CONFIG == {}
# OK
compare == "yoda"
age == 42
@@ -37,6 +31,3 @@ age <= YODA
YODA == YODA
age == JediOrder.YODA
(number - 100) > 0
SECONDS_IN_DAY == 60 * 60 * 24 # Error in 0.1.8
SomeClass().settings.SOME_CONSTANT_VALUE > (60 * 60) # Error in 0.1.8
{"non-empty-dict": "is-ok"} == DummyHandler.CONFIG

View File

@@ -1,18 +0,0 @@
from __future__ import annotations
from typing import TypeVar
x: "int" | str # TCH006
x: ("int" | str) | "bool" # TCH006
def func():
x: "int" | str # OK
z: list[str, str | "int"] = [] # TCH006
type A = Value["int" | str] # OK
OldS = TypeVar('OldS', int | 'str', str) # TCH006

View File

@@ -1,16 +0,0 @@
from typing import TypeVar
x: "int" | str # TCH006
x: ("int" | str) | "bool" # TCH006
def func():
x: "int" | str # OK
z: list[str, str | "int"] = [] # TCH006
type A = Value["int" | str] # OK
OldS = TypeVar('OldS', int | 'str', str) # TCH006

View File

@@ -1,7 +0,0 @@
"""Add `TYPE_CHECKING` to an existing `typing` import. Another member is moved."""
from __future__ import annotations
from typing import Final
Const: Final[dict] = {}

View File

@@ -1,7 +0,0 @@
"""Using `TYPE_CHECKING` from an existing `typing` import. Another member is moved."""
from __future__ import annotations
from typing import Final, TYPE_CHECKING
Const: Final[dict] = {}

View File

@@ -1,7 +0,0 @@
"""Using `TYPE_CHECKING` from an existing `typing` import. Another member is moved."""
from __future__ import annotations
from typing import Final, Mapping
Const: Final[dict] = {}

View File

@@ -65,28 +65,3 @@ def f():
def func(value: DataFrame):
...
def f():
from pandas import DataFrame, Series
def baz() -> DataFrame | Series:
...
def f():
from pandas import DataFrame, Series
def baz() -> (
DataFrame |
Series
):
...
class C:
x: DataFrame[
int
] = 1
def func() -> DataFrame[[DataFrame[_P, _R]], DataFrame[_P, _R]]:
...

View File

@@ -1,9 +0,0 @@
import a
"""Some other docstring."""
import b
"""Some other docstring."""
import c

View File

@@ -713,12 +713,5 @@ def retain_extra_whitespace_not_overindented():
This is not overindented
This is overindented, but since one line is not overindented this should not raise
And so is this, but it we should preserve the extra space on this line relative
"""
def inconsistent_indent_byte_size():
"""There's a non-breaking space (2-bytes) after 3 spaces (https://github.com/astral-sh/ruff/issues/9080).
    Returns:
And so is this, but it we should preserve the extra space on this line relative
"""

View File

@@ -1,4 +0,0 @@
import re
from typing import Annotated
type X = Annotated[int, lambda: re.compile("x")]

View File

@@ -1,5 +0,0 @@
"""Docstring"""
"""Non-docstring"""
from __future__ import absolute_import

View File

@@ -1,8 +0,0 @@
"""Test for accessing class members within a generator."""
class Class:
items = []
if len(replacements := {item[1] for item in items}) > 1:
pass

View File

@@ -1,36 +0,0 @@
def func() -> None: # OK
# 15 is max default
first = 1
second = 2
third = 3
fourth = 4
fifth = 5
sixth = 6
seventh = 7
eighth = 8
ninth = 9
tenth = 10
eleventh = 11
twelveth = 12
thirteenth = 13
fourteenth = 14
fifteenth = 15
def func() -> None: # PLR0914
first = 1
second = 2
third = 3
fourth = 4
fifth = 5
sixth = 6
seventh = 7
eighth = 8
ninth = 9
tenth = 10
eleventh = 11
twelfth = 12
thirteenth = 13
fourteenth = 14
fifteenth = 15
sixteenth = 16

View File

@@ -1,28 +1,30 @@
u"Hello"
# These should change
x = u"Hello"
x = u"Hello" # UP025
u'world'
u'world' # UP025
print(u"Hello")
print(u"Hello") # UP025
print(u'world') # UP025
print(u'world')
import foo
foo(u"Hello", U"world", a=u"Hello", b=u"world") # UP025
foo(u"Hello", U"world", a=u"Hello", b=u"world")
# Retain quotes when fixing.
x = u'hello' # UP025
x = u"""hello""" # UP025
x = u'''hello''' # UP025
x = u'Hello "World"' # UP025
# These should stay quoted they way they are
u = "Hello" # OK
u = u # OK
x = u'hello'
x = u"""hello"""
x = u'''hello'''
x = u'Hello "World"'
# These should not change
u = "Hello"
u = u
def hello():
return"Hello" # OK
return"Hello"
f"foo"u"bar" # OK
f"foo" u"bar" # OK
f"foo"u"bar"
f"foo" u"bar"

View File

@@ -243,12 +243,3 @@ raise ValueError(
).format(a, b)
("{}" "{{{}}}").format(a, b)
# The dictionary should be parenthesized.
"{}".format({0: 1}[0])
# The dictionary should be parenthesized.
"{}".format({0: 1}.bar)
# The dictionary should be parenthesized.
"{}".format({0: 1}())

View File

@@ -1,61 +0,0 @@
# Errors.
op_bitnot = lambda x: ~x
op_not = lambda x: not x
op_pos = lambda x: +x
op_neg = lambda x: -x
op_add = lambda x, y: x + y
op_sub = lambda x, y: x - y
op_mult = lambda x, y: x * y
op_matmutl = lambda x, y: x @ y
op_truediv = lambda x, y: x / y
op_mod = lambda x, y: x % y
op_pow = lambda x, y: x ** y
op_lshift = lambda x, y: x << y
op_rshift = lambda x, y: x >> y
op_bitor = lambda x, y: x | y
op_xor = lambda x, y: x ^ y
op_bitand = lambda x, y: x & y
op_floordiv = lambda x, y: x // y
op_eq = lambda x, y: x == y
op_ne = lambda x, y: x != y
op_lt = lambda x, y: x < y
op_lte = lambda x, y: x <= y
op_gt = lambda x, y: x > y
op_gte = lambda x, y: x >= y
op_is = lambda x, y: x is y
op_isnot = lambda x, y: x is not y
op_in = lambda x, y: y in x
def op_not2(x):
return not x
def op_add2(x, y):
return x + y
class Adder:
def add(x, y):
return x + y
# OK.
op_add3 = lambda x, y = 1: x + y
op_neg2 = lambda x, y: y - x
op_notin = lambda x, y: y not in x
op_and = lambda x, y: y and x
op_or = lambda x, y: y or x
op_in = lambda x, y: x in y
def op_neg3(x, y):
return y - x
def op_add4(x, y = 1):
return x + y
def op_add5(x, y):
print("op_add5")
return x + y

View File

@@ -122,43 +122,3 @@ async def f():
# OK
async def f():
task[i] = asyncio.create_task(coordinator.ws_connect())
# OK
async def f(x: int):
if x > 0:
task = asyncio.create_task(make_request())
else:
task = asyncio.create_task(make_request())
await task
# OK
async def f(x: bool):
if x:
t = asyncio.create_task(asyncio.sleep(1))
else:
t = None
try:
await asyncio.sleep(1)
finally:
if t:
await t
# Error
async def f(x: bool):
if x:
t = asyncio.create_task(asyncio.sleep(1))
else:
t = None
# OK
async def f(x: bool):
global T
if x:
T = asyncio.create_task(asyncio.sleep(1))
else:
T = None

View File

@@ -59,11 +59,3 @@ class F(BaseSettings):
without_annotation = []
class_variable: ClassVar[list[int]] = []
final_variable: Final[list[int]] = []
class G(F):
mutable_default: list[int] = []
immutable_annotation: Sequence[int] = []
without_annotation = []
class_variable: ClassVar[list[int]] = []
final_variable: Final[list[int]] = []

View File

@@ -1,8 +0,0 @@
from typing import Never, NoReturn, Union
Union[Never, int]
Union[NoReturn, int]
Never | int
NoReturn | int
Union[Union[Never, int], Union[NoReturn, int]]
Union[NoReturn, int, float]

View File

@@ -3,11 +3,12 @@ use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::{flake8_import_conventions, flake8_pyi, pyflakes, pylint};
use crate::rules::{flake8_import_conventions, flake8_pyi, pyflakes, pylint, ruff};
/// Run lint rules over the [`Binding`]s.
pub(crate) fn bindings(checker: &mut Checker) {
if !checker.any_enabled(&[
Rule::AsyncioDanglingTask,
Rule::InvalidAllFormat,
Rule::InvalidAllObject,
Rule::NonAsciiName,
@@ -71,5 +72,12 @@ pub(crate) fn bindings(checker: &mut Checker) {
checker.diagnostics.push(diagnostic);
}
}
if checker.enabled(Rule::AsyncioDanglingTask) {
if let Some(diagnostic) =
ruff::rules::asyncio_dangling_binding(binding, &checker.semantic)
{
checker.diagnostics.push(diagnostic);
}
}
}
}

View File

@@ -2,7 +2,7 @@ use ruff_python_ast::Expr;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::{flake8_pie, pylint, refurb};
use crate::rules::{flake8_pie, pylint};
/// Run lint rules over all deferred lambdas in the [`SemanticModel`].
pub(crate) fn deferred_lambdas(checker: &mut Checker) {
@@ -21,9 +21,6 @@ pub(crate) fn deferred_lambdas(checker: &mut Checker) {
if checker.enabled(Rule::ReimplementedContainerBuiltin) {
flake8_pie::rules::reimplemented_container_builtin(checker, lambda);
}
if checker.enabled(Rule::ReimplementedOperator) {
refurb::rules::reimplemented_operator(checker, &lambda.into());
}
}
}
}

View File

@@ -5,21 +5,16 @@ use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::rules::{
flake8_pyi, flake8_type_checking, flake8_unused_arguments, pyflakes, pylint, ruff,
};
use crate::rules::{flake8_pyi, flake8_type_checking, flake8_unused_arguments, pyflakes, pylint};
/// Run lint rules over all deferred scopes in the [`SemanticModel`].
pub(crate) fn deferred_scopes(checker: &mut Checker) {
if !checker.any_enabled(&[
Rule::AsyncioDanglingTask,
Rule::GlobalVariableNotAssigned,
Rule::ImportShadowedByLoopVar,
Rule::NoSelfUse,
Rule::RedefinedArgumentFromLocal,
Rule::RedefinedWhileUnused,
Rule::RuntimeImportInTypeCheckingBlock,
Rule::TooManyLocals,
Rule::TypingOnlyFirstPartyImport,
Rule::TypingOnlyStandardLibraryImport,
Rule::TypingOnlyThirdPartyImport,
@@ -36,6 +31,7 @@ pub(crate) fn deferred_scopes(checker: &mut Checker) {
Rule::UnusedPrivateTypedDict,
Rule::UnusedStaticMethodArgument,
Rule::UnusedVariable,
Rule::NoSelfUse,
]) {
return;
}
@@ -273,10 +269,6 @@ pub(crate) fn deferred_scopes(checker: &mut Checker) {
flake8_pyi::rules::unused_private_typed_dict(checker, scope, &mut diagnostics);
}
if checker.enabled(Rule::AsyncioDanglingTask) {
ruff::rules::asyncio_dangling_binding(scope, &checker.semantic, &mut diagnostics);
}
if matches!(scope.kind, ScopeKind::Function(_) | ScopeKind::Lambda(_)) {
if checker.enabled(Rule::UnusedVariable) {
pyflakes::rules::unused_variable(checker, scope, &mut diagnostics);
@@ -344,10 +336,6 @@ pub(crate) fn deferred_scopes(checker: &mut Checker) {
if checker.enabled(Rule::NoSelfUse) {
pylint::rules::no_self_use(checker, scope_id, scope, &mut diagnostics);
}
if checker.enabled(Rule::TooManyLocals) {
pylint::rules::too_many_locals(checker, scope, &mut diagnostics);
}
}
}
checker.diagnostics.extend(diagnostics);

View File

@@ -15,9 +15,8 @@ use crate::rules::{
flake8_comprehensions, flake8_datetimez, flake8_debugger, flake8_django,
flake8_future_annotations, flake8_gettext, flake8_implicit_str_concat, flake8_logging,
flake8_logging_format, flake8_pie, flake8_print, flake8_pyi, flake8_pytest_style, flake8_self,
flake8_simplify, flake8_tidy_imports, flake8_trio, flake8_type_checking, flake8_use_pathlib,
flynt, numpy, pandas_vet, pep8_naming, pycodestyle, pyflakes, pygrep_hooks, pylint, pyupgrade,
refurb, ruff,
flake8_simplify, flake8_tidy_imports, flake8_trio, flake8_use_pathlib, flynt, numpy,
pandas_vet, pep8_naming, pycodestyle, pyflakes, pygrep_hooks, pylint, pyupgrade, refurb, ruff,
};
use crate::settings::types::PythonVersion;
@@ -81,7 +80,6 @@ pub(crate) fn expression(expr: &Expr, checker: &mut Checker) {
Rule::DuplicateUnionMember,
Rule::RedundantLiteralUnion,
Rule::UnnecessaryTypeUnion,
Rule::NeverUnion,
]) {
// Avoid duplicate checks if the parent is a union, since these rules already
// traverse nested unions.
@@ -101,10 +99,6 @@ pub(crate) fn expression(expr: &Expr, checker: &mut Checker) {
}
}
if checker.enabled(Rule::NeverUnion) {
ruff::rules::never_union(checker, expr);
}
if checker.any_enabled(&[
Rule::SysVersionSlice3,
Rule::SysVersion2,
@@ -1159,10 +1153,6 @@ pub(crate) fn expression(expr: &Expr, checker: &mut Checker) {
}
}
if checker.enabled(Rule::NeverUnion) {
ruff::rules::never_union(checker, expr);
}
// Avoid duplicate checks if the parent is a union, since these rules already
// traverse nested unions.
if !checker.semantic.in_nested_union() {
@@ -1180,9 +1170,6 @@ pub(crate) fn expression(expr: &Expr, checker: &mut Checker) {
if checker.enabled(Rule::UnnecessaryTypeUnion) {
flake8_pyi::rules::unnecessary_type_union(checker, expr);
}
if checker.enabled(Rule::RuntimeStringUnion) {
flake8_type_checking::rules::runtime_string_union(checker, expr);
}
}
}
Expr::UnaryOp(

View File

@@ -368,9 +368,6 @@ pub(crate) fn statement(stmt: &Stmt, checker: &mut Checker) {
.diagnostics
.extend(ruff::rules::unreachable::in_function(name, body));
}
if checker.enabled(Rule::ReimplementedOperator) {
refurb::rules::reimplemented_operator(checker, &function_def.into());
}
}
Stmt::Return(_) => {
if checker.enabled(Rule::ReturnOutsideFunction) {
@@ -400,13 +397,27 @@ pub(crate) fn statement(stmt: &Stmt, checker: &mut Checker) {
flake8_django::rules::nullable_model_string_field(checker, body);
}
if checker.enabled(Rule::DjangoExcludeWithModelForm) {
flake8_django::rules::exclude_with_model_form(checker, class_def);
if let Some(diagnostic) = flake8_django::rules::exclude_with_model_form(
checker,
arguments.as_deref(),
body,
) {
checker.diagnostics.push(diagnostic);
}
}
if checker.enabled(Rule::DjangoAllWithModelForm) {
flake8_django::rules::all_with_model_form(checker, class_def);
if let Some(diagnostic) =
flake8_django::rules::all_with_model_form(checker, arguments.as_deref(), body)
{
checker.diagnostics.push(diagnostic);
}
}
if checker.enabled(Rule::DjangoUnorderedBodyContentInModel) {
flake8_django::rules::unordered_body_content_in_model(checker, class_def);
flake8_django::rules::unordered_body_content_in_model(
checker,
arguments.as_deref(),
body,
);
}
if !checker.source_type.is_stub() {
if checker.enabled(Rule::DjangoModelWithoutDunderStr) {

View File

@@ -287,18 +287,7 @@ where
// Track whether we've seen docstrings, non-imports, etc.
match stmt {
Stmt::Expr(ast::StmtExpr { value, .. })
if !self
.semantic
.flags
.intersects(SemanticModelFlags::MODULE_DOCSTRING)
&& value.is_string_literal_expr() =>
{
self.semantic.flags |= SemanticModelFlags::MODULE_DOCSTRING;
}
Stmt::ImportFrom(ast::StmtImportFrom { module, names, .. }) => {
self.semantic.flags |= SemanticModelFlags::MODULE_DOCSTRING;
// Allow __future__ imports until we see a non-__future__ import.
if let Some("__future__") = module.as_deref() {
if names
@@ -312,11 +301,9 @@ where
}
}
Stmt::Import(_) => {
self.semantic.flags |= SemanticModelFlags::MODULE_DOCSTRING;
self.semantic.flags |= SemanticModelFlags::FUTURES_BOUNDARY;
}
_ => {
self.semantic.flags |= SemanticModelFlags::MODULE_DOCSTRING;
self.semantic.flags |= SemanticModelFlags::FUTURES_BOUNDARY;
if !(self.semantic.seen_import_boundary()
|| helpers::is_assignment_to_a_dunder(stmt)
@@ -1448,8 +1435,11 @@ where
impl<'a> Checker<'a> {
/// Visit a [`Module`]. Returns `true` if the module contains a module-level docstring.
fn visit_module(&mut self, python_ast: &'a Suite) {
fn visit_module(&mut self, python_ast: &'a Suite) -> bool {
analyze::module(python_ast, self);
let docstring = docstrings::extraction::docstring_from(python_ast);
docstring.is_some()
}
/// Visit a list of [`Comprehension`] nodes, assumed to be the comprehensions that compose a
@@ -1755,13 +1745,10 @@ impl<'a> Checker<'a> {
return;
}
// If the expression is the left-hand side of a walrus operator, then it's a named
// expression assignment.
if self
.semantic
.current_expressions()
.filter_map(Expr::as_named_expr_expr)
.any(|parent| parent.target.as_ref() == expr)
.any(Expr::is_named_expr_expr)
{
self.add_binding(id, expr.range(), BindingKind::NamedExprAssignment, flags);
return;
@@ -2016,19 +2003,23 @@ pub(crate) fn check_ast(
);
checker.bind_builtins();
// Check for module docstring.
let python_ast = if checker.visit_module(python_ast) {
&python_ast[1..]
} else {
python_ast
};
// Iterate over the AST.
checker.visit_module(python_ast);
checker.visit_body(python_ast);
// Visit any deferred syntax nodes. Take care to visit in order, such that we avoid adding
// new deferred nodes after visiting nodes of that kind. For example, visiting a deferred
// function can add a deferred lambda, but the opposite is not true.
// Visit any deferred syntax nodes.
checker.visit_deferred_functions();
checker.visit_deferred_type_param_definitions();
checker.visit_deferred_lambdas();
checker.visit_deferred_future_type_definitions();
checker.visit_deferred_type_param_definitions();
let allocator = typed_arena::Arena::new();
checker.visit_deferred_string_type_definitions(&allocator);
checker.visit_deferred_lambdas();
checker.visit_exports();
// Check docstrings, bindings, and unresolved references.

View File

@@ -1,4 +1,4 @@
use ruff_diagnostics::Diagnostic;
use ruff_diagnostics::{Diagnostic, DiagnosticKind};
use ruff_python_codegen::Stylist;
use ruff_python_parser::lexer::LexResult;
use ruff_python_parser::TokenKind;
@@ -97,7 +97,7 @@ pub(crate) fn check_logical_lines(
indent_size,
) {
if settings.rules.enabled(kind.rule()) {
context.push_diagnostic(Diagnostic::new(kind, range));
context.push(kind, range);
}
}
@@ -123,6 +123,18 @@ impl<'a> LogicalLinesContext<'a> {
}
}
pub(crate) fn push<K: Into<DiagnosticKind>>(&mut self, kind: K, range: TextRange) {
let kind = kind.into();
if self.settings.rules.enabled(kind.rule()) {
self.diagnostics.push(Diagnostic {
kind,
range,
fix: None,
parent: None,
});
}
}
pub(crate) fn push_diagnostic(&mut self, diagnostic: Diagnostic) {
if self.settings.rules.enabled(diagnostic.kind.rule()) {
self.diagnostics.push(diagnostic);

View File

@@ -252,7 +252,6 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Pylint, "R0911") => (RuleGroup::Stable, rules::pylint::rules::TooManyReturnStatements),
(Pylint, "R0912") => (RuleGroup::Stable, rules::pylint::rules::TooManyBranches),
(Pylint, "R0913") => (RuleGroup::Stable, rules::pylint::rules::TooManyArguments),
(Pylint, "R0914") => (RuleGroup::Preview, rules::pylint::rules::TooManyLocals),
(Pylint, "R0915") => (RuleGroup::Stable, rules::pylint::rules::TooManyStatements),
(Pylint, "R0916") => (RuleGroup::Preview, rules::pylint::rules::TooManyBooleanExpressions),
(Pylint, "R0917") => (RuleGroup::Preview, rules::pylint::rules::TooManyPositional),
@@ -808,7 +807,6 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Flake8TypeChecking, "003") => (RuleGroup::Stable, rules::flake8_type_checking::rules::TypingOnlyStandardLibraryImport),
(Flake8TypeChecking, "004") => (RuleGroup::Stable, rules::flake8_type_checking::rules::RuntimeImportInTypeCheckingBlock),
(Flake8TypeChecking, "005") => (RuleGroup::Stable, rules::flake8_type_checking::rules::EmptyTypeCheckingBlock),
(Flake8TypeChecking, "006") => (RuleGroup::Preview, rules::flake8_type_checking::rules::RuntimeStringUnion),
// tryceratops
(Tryceratops, "002") => (RuleGroup::Stable, rules::tryceratops::rules::RaiseVanillaClass),
@@ -901,7 +899,6 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Ruff, "017") => (RuleGroup::Nursery, rules::ruff::rules::QuadraticListSummation),
(Ruff, "018") => (RuleGroup::Preview, rules::ruff::rules::AssignmentInAssert),
(Ruff, "019") => (RuleGroup::Preview, rules::ruff::rules::UnnecessaryKeyCheck),
(Ruff, "020") => (RuleGroup::Preview, rules::ruff::rules::NeverUnion),
(Ruff, "100") => (RuleGroup::Stable, rules::ruff::rules::UnusedNOQA),
(Ruff, "200") => (RuleGroup::Stable, rules::ruff::rules::InvalidPyprojectToml),
@@ -954,7 +951,6 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Refurb, "105") => (RuleGroup::Preview, rules::refurb::rules::PrintEmptyString),
#[allow(deprecated)]
(Refurb, "113") => (RuleGroup::Nursery, rules::refurb::rules::RepeatedAppend),
(Refurb, "118") => (RuleGroup::Preview, rules::refurb::rules::ReimplementedOperator),
#[allow(deprecated)]
(Refurb, "131") => (RuleGroup::Nursery, rules::refurb::rules::DeleteFullSlice),
#[allow(deprecated)]

View File

@@ -13,7 +13,7 @@ use ruff_text_size::{Ranged, TextSize};
use ruff_diagnostics::Edit;
use ruff_python_ast::imports::{AnyImport, Import, ImportFrom};
use ruff_python_codegen::Stylist;
use ruff_python_semantic::{ImportedName, SemanticModel};
use ruff_python_semantic::SemanticModel;
use ruff_python_trivia::textwrap::indent;
use ruff_source_file::Locator;
@@ -132,48 +132,7 @@ impl<'a> Importer<'a> {
)?;
// Import the `TYPE_CHECKING` symbol from the typing module.
let (type_checking_edit, type_checking) =
if let Some(type_checking) = Self::find_type_checking(at, semantic)? {
// Special-case: if the `TYPE_CHECKING` symbol is imported as part of the same
// statement that we're modifying, avoid adding a no-op edit. For example, here,
// the `TYPE_CHECKING` no-op edit would overlap with the edit to remove `Final`
// from the import:
// ```python
// from __future__ import annotations
//
// from typing import Final, TYPE_CHECKING
//
// Const: Final[dict] = {}
// ```
let edit = if type_checking.statement(semantic) == import.statement {
None
} else {
Some(Edit::range_replacement(
self.locator.slice(type_checking.range()).to_string(),
type_checking.range(),
))
};
(edit, type_checking.into_name())
} else {
// Special-case: if the `TYPE_CHECKING` symbol would be added to the same import
// we're modifying, import it as a separate import statement. For example, here,
// we're concurrently removing `Final` and adding `TYPE_CHECKING`, so it's easier to
// use a separate import statement:
// ```python
// from __future__ import annotations
//
// from typing import Final
//
// Const: Final[dict] = {}
// ```
let (edit, name) = self.import_symbol(
&ImportRequest::import_from("typing", "TYPE_CHECKING"),
at,
Some(import.statement),
semantic,
)?;
(Some(edit), name)
};
let (type_checking_edit, type_checking) = self.get_or_import_type_checking(at, semantic)?;
// Add the import to a `TYPE_CHECKING` block.
let add_import_edit = if let Some(block) = self.preceding_type_checking_block(at) {
@@ -198,21 +157,28 @@ impl<'a> Importer<'a> {
})
}
/// Find a reference to `typing.TYPE_CHECKING`.
fn find_type_checking(
/// Generate an [`Edit`] to reference `typing.TYPE_CHECKING`. Returns the [`Edit`] necessary to
/// make the symbol available in the current scope along with the bound name of the symbol.
fn get_or_import_type_checking(
&self,
at: TextSize,
semantic: &SemanticModel,
) -> Result<Option<ImportedName>, ResolutionError> {
) -> Result<(Edit, String), ResolutionError> {
for module in semantic.typing_modules() {
if let Some(imported_name) = Self::find_symbol(
if let Some((edit, name)) = self.get_symbol(
&ImportRequest::import_from(module, "TYPE_CHECKING"),
at,
semantic,
)? {
return Ok(Some(imported_name));
return Ok((edit, name));
}
}
Ok(None)
self.import_symbol(
&ImportRequest::import_from("typing", "TYPE_CHECKING"),
at,
semantic,
)
}
/// Generate an [`Edit`] to reference the given symbol. Returns the [`Edit`] necessary to make
@@ -226,15 +192,16 @@ impl<'a> Importer<'a> {
semantic: &SemanticModel,
) -> Result<(Edit, String), ResolutionError> {
self.get_symbol(symbol, at, semantic)?
.map_or_else(|| self.import_symbol(symbol, at, None, semantic), Ok)
.map_or_else(|| self.import_symbol(symbol, at, semantic), Ok)
}
/// Return the [`ImportedName`] to for existing symbol, if it's present in the given [`SemanticModel`].
fn find_symbol(
/// Return an [`Edit`] to reference an existing symbol, if it's present in the given [`SemanticModel`].
fn get_symbol(
&self,
symbol: &ImportRequest,
at: TextSize,
semantic: &SemanticModel,
) -> Result<Option<ImportedName>, ResolutionError> {
) -> Result<Option<(Edit, String)>, ResolutionError> {
// If the symbol is already available in the current scope, use it.
let Some(imported_name) =
semantic.resolve_qualified_import_name(symbol.module, symbol.member)
@@ -259,21 +226,6 @@ impl<'a> Importer<'a> {
return Err(ResolutionError::IncompatibleContext);
}
Ok(Some(imported_name))
}
/// Return an [`Edit`] to reference an existing symbol, if it's present in the given [`SemanticModel`].
fn get_symbol(
&self,
symbol: &ImportRequest,
at: TextSize,
semantic: &SemanticModel,
) -> Result<Option<(Edit, String)>, ResolutionError> {
// Find the symbol in the current scope.
let Some(imported_name) = Self::find_symbol(symbol, at, semantic)? else {
return Ok(None);
};
// We also add a no-op edit to force conflicts with any other fixes that might try to
// remove the import. Consider:
//
@@ -307,13 +259,9 @@ impl<'a> Importer<'a> {
&self,
symbol: &ImportRequest,
at: TextSize,
except: Option<&Stmt>,
semantic: &SemanticModel,
) -> Result<(Edit, String), ResolutionError> {
if let Some(stmt) = self
.find_import_from(symbol.module, at)
.filter(|stmt| except != Some(stmt))
{
if let Some(stmt) = self.find_import_from(symbol.module, at) {
// Case 1: `from functools import lru_cache` is in scope, and we're trying to reference
// `functools.cache`; thus, we add `cache` to the import, and return `"cache"` as the
// bound name.
@@ -475,18 +423,14 @@ impl RuntimeImportEdit {
#[derive(Debug)]
pub(crate) struct TypingImportEdit {
/// The edit to add the `TYPE_CHECKING` symbol to the module.
type_checking_edit: Option<Edit>,
type_checking_edit: Edit,
/// The edit to add the import to a `TYPE_CHECKING` block.
add_import_edit: Edit,
}
impl TypingImportEdit {
pub(crate) fn into_edits(self) -> (Edit, Option<Edit>) {
if let Some(type_checking_edit) = self.type_checking_edit {
(type_checking_edit, Some(self.add_import_edit))
} else {
(self.add_import_edit, None)
}
pub(crate) fn into_edits(self) -> Vec<Edit> {
vec![self.type_checking_edit, self.add_import_edit]
}
}

View File

@@ -11,7 +11,7 @@ use rustc_hash::FxHashMap;
use ruff_diagnostics::Diagnostic;
use ruff_notebook::Notebook;
use ruff_python_ast::imports::ImportMap;
use ruff_python_ast::{PySourceType, Suite};
use ruff_python_ast::PySourceType;
use ruff_python_codegen::Stylist;
use ruff_python_index::Indexer;
use ruff_python_parser::lexer::LexResult;
@@ -73,6 +73,7 @@ pub struct FixerResult<'a> {
pub fn check_path(
path: &Path,
package: Option<&Path>,
tokens: Vec<LexResult>,
locator: &Locator,
stylist: &Stylist,
indexer: &Indexer,
@@ -81,7 +82,6 @@ pub fn check_path(
noqa: flags::Noqa,
source_kind: &SourceKind,
source_type: PySourceType,
tokens: TokenSource,
) -> LinterResult<(Vec<Diagnostic>, Option<ImportMap>)> {
// Aggregate all diagnostics.
let mut diagnostics = vec![];
@@ -144,8 +144,12 @@ pub fn check_path(
.iter_enabled()
.any(|rule_code| rule_code.lint_source().is_imports());
if use_ast || use_imports || use_doc_lines {
// Parse, if the AST wasn't pre-provided provided.
match tokens.into_ast_source(source_kind, source_type, path) {
match ruff_python_parser::parse_program_tokens(
tokens,
source_kind.source_code(),
&path.to_string_lossy(),
source_type.is_ipynb(),
) {
Ok(python_ast) => {
let cell_offsets = source_kind.as_ipy_notebook().map(Notebook::cell_offsets);
if use_ast {
@@ -321,6 +325,7 @@ pub fn add_noqa_to_path(
} = check_path(
path,
package,
tokens,
&locator,
&stylist,
&indexer,
@@ -329,7 +334,6 @@ pub fn add_noqa_to_path(
flags::Noqa::Disabled,
source_kind,
source_type,
TokenSource::Tokens(tokens),
);
// Log any parse errors.
@@ -361,10 +365,10 @@ pub fn lint_only(
noqa: flags::Noqa,
source_kind: &SourceKind,
source_type: PySourceType,
data: ParseSource,
) -> LinterResult<(Vec<Message>, Option<ImportMap>)> {
// Tokenize once.
let tokens = data.into_token_source(source_kind, source_type);
let tokens: Vec<LexResult> =
ruff_python_parser::tokenize(source_kind.source_code(), source_type.as_mode());
// Map row and column locations to byte slices (lazily).
let locator = Locator::new(source_kind.source_code());
@@ -387,6 +391,7 @@ pub fn lint_only(
let result = check_path(
path,
package,
tokens,
&locator,
&stylist,
&indexer,
@@ -395,7 +400,6 @@ pub fn lint_only(
noqa,
source_kind,
source_type,
tokens,
);
result.map(|(diagnostics, imports)| {
@@ -483,6 +487,7 @@ pub fn lint_fix<'a>(
let result = check_path(
path,
package,
tokens,
&locator,
&stylist,
&indexer,
@@ -491,7 +496,6 @@ pub fn lint_fix<'a>(
noqa,
&transformed,
source_type,
TokenSource::Tokens(tokens),
);
if iterations == 0 {
@@ -628,95 +632,6 @@ This indicates a bug in Ruff. If you could open an issue at:
}
}
#[derive(Debug, Clone)]
pub enum ParseSource<'a> {
/// Extract the tokens and AST from the given source code.
None,
/// Use the precomputed tokens and AST.
Precomputed {
tokens: &'a [LexResult],
ast: &'a Suite,
},
}
impl<'a> ParseSource<'a> {
/// Convert to a [`TokenSource`], tokenizing if necessary.
fn into_token_source(
self,
source_kind: &SourceKind,
source_type: PySourceType,
) -> TokenSource<'a> {
match self {
Self::None => TokenSource::Tokens(ruff_python_parser::tokenize(
source_kind.source_code(),
source_type.as_mode(),
)),
Self::Precomputed { tokens, ast } => TokenSource::Precomputed { tokens, ast },
}
}
}
#[derive(Debug, Clone)]
pub enum TokenSource<'a> {
/// Use the precomputed tokens to generate the AST.
Tokens(Vec<LexResult>),
/// Use the precomputed tokens and AST.
Precomputed {
tokens: &'a [LexResult],
ast: &'a Suite,
},
}
impl Deref for TokenSource<'_> {
type Target = [LexResult];
fn deref(&self) -> &Self::Target {
match self {
Self::Tokens(tokens) => tokens,
Self::Precomputed { tokens, .. } => tokens,
}
}
}
impl<'a> TokenSource<'a> {
/// Convert to an [`AstSource`], parsing if necessary.
fn into_ast_source(
self,
source_kind: &SourceKind,
source_type: PySourceType,
path: &Path,
) -> Result<AstSource<'a>, ParseError> {
match self {
Self::Tokens(tokens) => Ok(AstSource::Ast(ruff_python_parser::parse_program_tokens(
tokens,
source_kind.source_code(),
&path.to_string_lossy(),
source_type.is_ipynb(),
)?)),
Self::Precomputed { ast, .. } => Ok(AstSource::Precomputed(ast)),
}
}
}
#[derive(Debug, Clone)]
pub enum AstSource<'a> {
/// Extract the AST from the given source code.
Ast(Suite),
/// Use the precomputed AST.
Precomputed(&'a Suite),
}
impl Deref for AstSource<'_> {
type Target = Suite;
fn deref(&self) -> &Self::Target {
match self {
Self::Ast(ast) => ast,
Self::Precomputed(ast) => ast,
}
}
}
#[cfg(test)]
mod tests {
use std::path::Path;

View File

@@ -243,17 +243,6 @@ pub struct PreviewOptions {
pub require_explicit: bool,
}
impl PreviewOptions {
/// Return a copy with the same preview mode setting but require explicit disabled.
#[must_use]
pub fn without_require_explicit(&self) -> Self {
Self {
mode: self.mode,
require_explicit: false,
}
}
}
#[cfg(feature = "schemars")]
mod schema {
use itertools::Itertools;

View File

@@ -3,7 +3,7 @@ use rustc_hash::FxHashSet;
use ruff_diagnostics::Edit;
use ruff_python_ast::helpers::{
pep_604_union, typing_optional, typing_union, ReturnStatementVisitor, Terminal,
implicit_return, pep_604_union, typing_optional, typing_union, ReturnStatementVisitor,
};
use ruff_python_ast::visitor::Visitor;
use ruff_python_ast::{self as ast, Expr, ExprContext};
@@ -57,14 +57,6 @@ pub(crate) fn auto_return_type(function: &ast::StmtFunctionDef) -> Option<AutoPy
visitor.returns
};
// Determine the terminal behavior (i.e., implicit return, no return, etc.).
let terminal = Terminal::from_function(function);
// If every control flow path raises an exception, return `NoReturn`.
if terminal == Some(Terminal::Raise) {
return Some(AutoPythonType::Never);
}
// Determine the return type of the first `return` statement.
let Some((return_statement, returns)) = returns.split_first() else {
return Some(AutoPythonType::Atom(PythonType::None));
@@ -88,7 +80,7 @@ pub(crate) fn auto_return_type(function: &ast::StmtFunctionDef) -> Option<AutoPy
// if x > 0:
// return 1
// ```
if terminal.is_none() {
if implicit_return(function) {
return_type = return_type.union(ResolvedPythonType::Atom(PythonType::None));
}
@@ -102,7 +94,6 @@ pub(crate) fn auto_return_type(function: &ast::StmtFunctionDef) -> Option<AutoPy
#[derive(Debug)]
pub(crate) enum AutoPythonType {
Never,
Atom(PythonType),
Union(FxHashSet<PythonType>),
}
@@ -120,28 +111,6 @@ impl AutoPythonType {
target_version: PythonVersion,
) -> Option<(Expr, Vec<Edit>)> {
match self {
AutoPythonType::Never => {
let (no_return_edit, binding) = importer
.get_or_import_symbol(
&ImportRequest::import_from(
"typing",
if target_version >= PythonVersion::Py311 {
"Never"
} else {
"NoReturn"
},
),
at,
semantic,
)
.ok()?;
let expr = Expr::Name(ast::ExprName {
id: binding,
range: TextRange::default(),
ctx: ExprContext::Load,
});
Some((expr, vec![no_return_edit]))
}
AutoPythonType::Atom(python_type) => {
let expr = type_expr(python_type)?;
Some((expr, vec![]))

View File

@@ -482,6 +482,7 @@ impl Violation for AnyType {
format!("Dynamically typed expressions (typing.Any) are disallowed in `{name}`")
}
}
fn is_none_returning(body: &[Stmt]) -> bool {
let mut visitor = ReturnStatementVisitor::default();
visitor.visit_body(body);
@@ -536,41 +537,17 @@ fn check_dynamically_typed<F>(
}
}
/// Return `true` if a function appears to be a stub.
fn is_stub_function(function_def: &ast::StmtFunctionDef, checker: &Checker) -> bool {
/// Returns `true` if a function has an empty body.
fn is_empty_body(function_def: &ast::StmtFunctionDef) -> bool {
function_def.body.iter().all(|stmt| match stmt {
Stmt::Pass(_) => true,
Stmt::Expr(ast::StmtExpr { value, range: _ }) => {
matches!(
value.as_ref(),
Expr::StringLiteral(_) | Expr::EllipsisLiteral(_)
)
}
_ => false,
})
}
// Ignore functions with empty bodies in...
if is_empty_body(function_def) {
// Stub definitions (.pyi files)...
if checker.source_type.is_stub() {
return true;
fn is_empty_body(body: &[Stmt]) -> bool {
body.iter().all(|stmt| match stmt {
Stmt::Pass(_) => true,
Stmt::Expr(ast::StmtExpr { value, range: _ }) => {
matches!(
value.as_ref(),
Expr::StringLiteral(_) | Expr::EllipsisLiteral(_)
)
}
// Abstract methods...
if visibility::is_abstract(&function_def.decorator_list, checker.semantic()) {
return true;
}
// Overload definitions...
if visibility::is_overload(&function_def.decorator_list, checker.semantic()) {
return true;
}
}
false
_ => false,
})
}
/// Generate flake8-annotation checks for a given `Definition`.
@@ -761,7 +738,9 @@ pub(crate) fn definition(
) {
if is_method && visibility::is_classmethod(decorator_list, checker.semantic()) {
if checker.enabled(Rule::MissingReturnTypeClassMethod) {
let return_type = if is_stub_function(function, checker) {
let return_type = if visibility::is_abstract(decorator_list, checker.semantic())
&& is_empty_body(body)
{
None
} else {
auto_return_type(function)
@@ -792,7 +771,9 @@ pub(crate) fn definition(
}
} else if is_method && visibility::is_staticmethod(decorator_list, checker.semantic()) {
if checker.enabled(Rule::MissingReturnTypeStaticMethod) {
let return_type = if is_stub_function(function, checker) {
let return_type = if visibility::is_abstract(decorator_list, checker.semantic())
&& is_empty_body(body)
{
None
} else {
auto_return_type(function)
@@ -862,22 +843,25 @@ pub(crate) fn definition(
match visibility {
visibility::Visibility::Public => {
if checker.enabled(Rule::MissingReturnTypeUndocumentedPublicFunction) {
let return_type = if is_stub_function(function, checker) {
None
} else {
auto_return_type(function)
.and_then(|return_type| {
return_type.into_expression(
checker.importer(),
function.parameters.start(),
checker.semantic(),
checker.settings.target_version,
)
})
.map(|(return_type, edits)| {
(checker.generator().expr(&return_type), edits)
})
};
let return_type =
if visibility::is_abstract(decorator_list, checker.semantic())
&& is_empty_body(body)
{
None
} else {
auto_return_type(function)
.and_then(|return_type| {
return_type.into_expression(
checker.importer(),
function.parameters.start(),
checker.semantic(),
checker.settings.target_version,
)
})
.map(|(return_type, edits)| {
(checker.generator().expr(&return_type), edits)
})
};
let mut diagnostic = Diagnostic::new(
MissingReturnTypeUndocumentedPublicFunction {
name: name.to_string(),
@@ -901,22 +885,25 @@ pub(crate) fn definition(
}
visibility::Visibility::Private => {
if checker.enabled(Rule::MissingReturnTypePrivateFunction) {
let return_type = if is_stub_function(function, checker) {
None
} else {
auto_return_type(function)
.and_then(|return_type| {
return_type.into_expression(
checker.importer(),
function.parameters.start(),
checker.semantic(),
checker.settings.target_version,
)
})
.map(|(return_type, edits)| {
(checker.generator().expr(&return_type), edits)
})
};
let return_type =
if visibility::is_abstract(decorator_list, checker.semantic())
&& is_empty_body(body)
{
None
} else {
auto_return_type(function)
.and_then(|return_type| {
return_type.into_expression(
checker.importer(),
function.parameters.start(),
checker.semantic(),
checker.settings.target_version,
)
})
.map(|(return_type, edits)| {
(checker.generator().expr(&return_type), edits)
})
};
let mut diagnostic = Diagnostic::new(
MissingReturnTypePrivateFunction {
name: name.to_string(),

View File

@@ -495,88 +495,4 @@ auto_return_type.py:180:9: ANN201 [*] Missing return type annotation for public
182 182 | return 1
183 183 | else:
auto_return_type.py:187:5: ANN201 [*] Missing return type annotation for public function `func`
|
187 | def func(x: int):
| ^^^^ ANN201
188 | try:
189 | pass
|
= help: Add return type annotation: `int | None`
Unsafe fix
184 184 | return 1.5
185 185 |
186 186 |
187 |-def func(x: int):
187 |+def func(x: int) -> int | None:
188 188 | try:
189 189 | pass
190 190 | except:
auto_return_type.py:194:5: ANN201 [*] Missing return type annotation for public function `func`
|
194 | def func(x: int):
| ^^^^ ANN201
195 | try:
196 | pass
|
= help: Add return type annotation: `int`
Unsafe fix
191 191 | return 2
192 192 |
193 193 |
194 |-def func(x: int):
194 |+def func(x: int) -> int:
195 195 | try:
196 196 | pass
197 197 | except:
auto_return_type.py:203:5: ANN201 [*] Missing return type annotation for public function `func`
|
203 | def func(x: int):
| ^^^^ ANN201
204 | if not x:
205 | raise ValueError
|
= help: Add return type annotation: `Never`
Unsafe fix
151 151 |
152 152 | import abc
153 153 | from abc import abstractmethod
154 |+from typing import Never
154 155 |
155 156 |
156 157 | class Foo(abc.ABC):
--------------------------------------------------------------------------------
200 201 | return 3
201 202 |
202 203 |
203 |-def func(x: int):
204 |+def func(x: int) -> Never:
204 205 | if not x:
205 206 | raise ValueError
206 207 | else:
auto_return_type.py:210:5: ANN201 [*] Missing return type annotation for public function `func`
|
210 | def func(x: int):
| ^^^^ ANN201
211 | if not x:
212 | raise ValueError
|
= help: Add return type annotation: `int`
Unsafe fix
207 207 | raise TypeError
208 208 |
209 209 |
210 |-def func(x: int):
210 |+def func(x: int) -> int:
211 211 | if not x:
212 212 | raise ValueError
213 213 | else:

View File

@@ -550,96 +550,4 @@ auto_return_type.py:180:9: ANN201 [*] Missing return type annotation for public
182 182 | return 1
183 183 | else:
auto_return_type.py:187:5: ANN201 [*] Missing return type annotation for public function `func`
|
187 | def func(x: int):
| ^^^^ ANN201
188 | try:
189 | pass
|
= help: Add return type annotation: `Optional[int]`
Unsafe fix
151 151 |
152 152 | import abc
153 153 | from abc import abstractmethod
154 |+from typing import Optional
154 155 |
155 156 |
156 157 | class Foo(abc.ABC):
--------------------------------------------------------------------------------
184 185 | return 1.5
185 186 |
186 187 |
187 |-def func(x: int):
188 |+def func(x: int) -> Optional[int]:
188 189 | try:
189 190 | pass
190 191 | except:
auto_return_type.py:194:5: ANN201 [*] Missing return type annotation for public function `func`
|
194 | def func(x: int):
| ^^^^ ANN201
195 | try:
196 | pass
|
= help: Add return type annotation: `int`
Unsafe fix
191 191 | return 2
192 192 |
193 193 |
194 |-def func(x: int):
194 |+def func(x: int) -> int:
195 195 | try:
196 196 | pass
197 197 | except:
auto_return_type.py:203:5: ANN201 [*] Missing return type annotation for public function `func`
|
203 | def func(x: int):
| ^^^^ ANN201
204 | if not x:
205 | raise ValueError
|
= help: Add return type annotation: `NoReturn`
Unsafe fix
151 151 |
152 152 | import abc
153 153 | from abc import abstractmethod
154 |+from typing import NoReturn
154 155 |
155 156 |
156 157 | class Foo(abc.ABC):
--------------------------------------------------------------------------------
200 201 | return 3
201 202 |
202 203 |
203 |-def func(x: int):
204 |+def func(x: int) -> NoReturn:
204 205 | if not x:
205 206 | raise ValueError
206 207 | else:
auto_return_type.py:210:5: ANN201 [*] Missing return type annotation for public function `func`
|
210 | def func(x: int):
| ^^^^ ANN201
211 | if not x:
212 | raise ValueError
|
= help: Add return type annotation: `int`
Unsafe fix
207 207 | raise TypeError
208 208 |
209 209 |
210 |-def func(x: int):
210 |+def func(x: int) -> int:
211 211 | if not x:
212 212 | raise ValueError
213 213 | else:

View File

@@ -1,4 +1,4 @@
use ruff_diagnostics::{AlwaysFixableViolation, Applicability, Diagnostic, Fix};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::{self as ast, Arguments, Expr};
@@ -6,7 +6,6 @@ use ruff_python_semantic::SemanticModel;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix::edits::add_argument;
/// ## What it does
/// Checks for `zip` calls without an explicit `strict` parameter.
@@ -29,25 +28,16 @@ use crate::fix::edits::add_argument;
/// zip(a, b, strict=True)
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe for `zip` calls that contain
/// `**kwargs`, as adding a `check` keyword argument to such a call may lead
/// to a duplicate keyword argument error.
///
/// ## References
/// - [Python documentation: `zip`](https://docs.python.org/3/library/functions.html#zip)
#[violation]
pub struct ZipWithoutExplicitStrict;
impl AlwaysFixableViolation for ZipWithoutExplicitStrict {
impl Violation for ZipWithoutExplicitStrict {
#[derive_message_formats]
fn message(&self) -> String {
format!("`zip()` without an explicit `strict=` parameter")
}
fn fix_title(&self) -> String {
"Add explicit `strict=False`".to_string()
}
}
/// B905
@@ -62,27 +52,9 @@ pub(crate) fn zip_without_explicit_strict(checker: &mut Checker, call: &ast::Exp
.iter()
.any(|arg| is_infinite_iterator(arg, checker.semantic()))
{
let mut diagnostic = Diagnostic::new(ZipWithoutExplicitStrict, call.range());
diagnostic.set_fix(Fix::applicable_edit(
add_argument(
"strict=False",
&call.arguments,
checker.indexer().comment_ranges(),
checker.locator().contents(),
),
// If the function call contains `**kwargs`, mark the fix as unsafe.
if call
.arguments
.keywords
.iter()
.any(|keyword| keyword.arg.is_none())
{
Applicability::Unsafe
} else {
Applicability::Safe
},
));
checker.diagnostics.push(diagnostic);
checker
.diagnostics
.push(Diagnostic::new(ZipWithoutExplicitStrict, call.range()));
}
}
}

View File

@@ -1,7 +1,7 @@
---
source: crates/ruff_linter/src/rules/flake8_bugbear/mod.rs
---
B905.py:4:1: B905 [*] `zip()` without an explicit `strict=` parameter
B905.py:4:1: B905 `zip()` without an explicit `strict=` parameter
|
3 | # Errors
4 | zip()
@@ -9,19 +9,8 @@ B905.py:4:1: B905 [*] `zip()` without an explicit `strict=` parameter
5 | zip(range(3))
6 | zip("a", "b")
|
= help: Add explicit `strict=False`
Safe fix
1 1 | from itertools import count, cycle, repeat
2 2 |
3 3 | # Errors
4 |-zip()
4 |+zip(strict=False)
5 5 | zip(range(3))
6 6 | zip("a", "b")
7 7 | zip("a", "b", *zip("c"))
B905.py:5:1: B905 [*] `zip()` without an explicit `strict=` parameter
B905.py:5:1: B905 `zip()` without an explicit `strict=` parameter
|
3 | # Errors
4 | zip()
@@ -30,19 +19,8 @@ B905.py:5:1: B905 [*] `zip()` without an explicit `strict=` parameter
6 | zip("a", "b")
7 | zip("a", "b", *zip("c"))
|
= help: Add explicit `strict=False`
Safe fix
2 2 |
3 3 | # Errors
4 4 | zip()
5 |-zip(range(3))
5 |+zip(range(3), strict=False)
6 6 | zip("a", "b")
7 7 | zip("a", "b", *zip("c"))
8 8 | zip(zip("a"), strict=False)
B905.py:6:1: B905 [*] `zip()` without an explicit `strict=` parameter
B905.py:6:1: B905 `zip()` without an explicit `strict=` parameter
|
4 | zip()
5 | zip(range(3))
@@ -51,19 +29,8 @@ B905.py:6:1: B905 [*] `zip()` without an explicit `strict=` parameter
7 | zip("a", "b", *zip("c"))
8 | zip(zip("a"), strict=False)
|
= help: Add explicit `strict=False`
Safe fix
3 3 | # Errors
4 4 | zip()
5 5 | zip(range(3))
6 |-zip("a", "b")
6 |+zip("a", "b", strict=False)
7 7 | zip("a", "b", *zip("c"))
8 8 | zip(zip("a"), strict=False)
9 9 | zip(zip("a", strict=True))
B905.py:7:1: B905 [*] `zip()` without an explicit `strict=` parameter
B905.py:7:1: B905 `zip()` without an explicit `strict=` parameter
|
5 | zip(range(3))
6 | zip("a", "b")
@@ -72,19 +39,8 @@ B905.py:7:1: B905 [*] `zip()` without an explicit `strict=` parameter
8 | zip(zip("a"), strict=False)
9 | zip(zip("a", strict=True))
|
= help: Add explicit `strict=False`
Safe fix
4 4 | zip()
5 5 | zip(range(3))
6 6 | zip("a", "b")
7 |-zip("a", "b", *zip("c"))
7 |+zip("a", "b", *zip("c"), strict=False)
8 8 | zip(zip("a"), strict=False)
9 9 | zip(zip("a", strict=True))
10 10 |
B905.py:7:16: B905 [*] `zip()` without an explicit `strict=` parameter
B905.py:7:16: B905 `zip()` without an explicit `strict=` parameter
|
5 | zip(range(3))
6 | zip("a", "b")
@@ -93,19 +49,8 @@ B905.py:7:16: B905 [*] `zip()` without an explicit `strict=` parameter
8 | zip(zip("a"), strict=False)
9 | zip(zip("a", strict=True))
|
= help: Add explicit `strict=False`
Safe fix
4 4 | zip()
5 5 | zip(range(3))
6 6 | zip("a", "b")
7 |-zip("a", "b", *zip("c"))
7 |+zip("a", "b", *zip("c", strict=False))
8 8 | zip(zip("a"), strict=False)
9 9 | zip(zip("a", strict=True))
10 10 |
B905.py:8:5: B905 [*] `zip()` without an explicit `strict=` parameter
B905.py:8:5: B905 `zip()` without an explicit `strict=` parameter
|
6 | zip("a", "b")
7 | zip("a", "b", *zip("c"))
@@ -113,19 +58,8 @@ B905.py:8:5: B905 [*] `zip()` without an explicit `strict=` parameter
| ^^^^^^^^ B905
9 | zip(zip("a", strict=True))
|
= help: Add explicit `strict=False`
Safe fix
5 5 | zip(range(3))
6 6 | zip("a", "b")
7 7 | zip("a", "b", *zip("c"))
8 |-zip(zip("a"), strict=False)
8 |+zip(zip("a", strict=False), strict=False)
9 9 | zip(zip("a", strict=True))
10 10 |
11 11 | # OK
B905.py:9:1: B905 [*] `zip()` without an explicit `strict=` parameter
B905.py:9:1: B905 `zip()` without an explicit `strict=` parameter
|
7 | zip("a", "b", *zip("c"))
8 | zip(zip("a"), strict=False)
@@ -134,49 +68,21 @@ B905.py:9:1: B905 [*] `zip()` without an explicit `strict=` parameter
10 |
11 | # OK
|
= help: Add explicit `strict=False`
Safe fix
6 6 | zip("a", "b")
7 7 | zip("a", "b", *zip("c"))
8 8 | zip(zip("a"), strict=False)
9 |-zip(zip("a", strict=True))
9 |+zip(zip("a", strict=True), strict=False)
10 10 |
11 11 | # OK
12 12 | zip(range(3), strict=True)
B905.py:24:1: B905 [*] `zip()` without an explicit `strict=` parameter
B905.py:24:1: B905 `zip()` without an explicit `strict=` parameter
|
23 | # Errors (limited iterators).
24 | zip([1, 2, 3], repeat(1, 1))
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ B905
25 | zip([1, 2, 3], repeat(1, times=4))
|
= help: Add explicit `strict=False`
Safe fix
21 21 | zip([1, 2, 3], repeat(1, times=None))
22 22 |
23 23 | # Errors (limited iterators).
24 |-zip([1, 2, 3], repeat(1, 1))
24 |+zip([1, 2, 3], repeat(1, 1), strict=False)
25 25 | zip([1, 2, 3], repeat(1, times=4))
B905.py:25:1: B905 [*] `zip()` without an explicit `strict=` parameter
B905.py:25:1: B905 `zip()` without an explicit `strict=` parameter
|
23 | # Errors (limited iterators).
24 | zip([1, 2, 3], repeat(1, 1))
25 | zip([1, 2, 3], repeat(1, times=4))
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ B905
|
= help: Add explicit `strict=False`
Safe fix
22 22 |
23 23 | # Errors (limited iterators).
24 24 | zip([1, 2, 3], repeat(1, 1))
25 |-zip([1, 2, 3], repeat(1, times=4))
25 |+zip([1, 2, 3], repeat(1, times=4), strict=False)

View File

@@ -1,6 +1,7 @@
use ruff_python_ast::{self as ast, Arguments, Expr, Stmt};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::{self as ast, Expr, Stmt};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
@@ -47,12 +48,21 @@ impl Violation for DjangoAllWithModelForm {
}
/// DJ007
pub(crate) fn all_with_model_form(checker: &mut Checker, class_def: &ast::StmtClassDef) {
if !is_model_form(class_def, checker.semantic()) {
return;
pub(crate) fn all_with_model_form(
checker: &Checker,
arguments: Option<&Arguments>,
body: &[Stmt],
) -> Option<Diagnostic> {
if !arguments.is_some_and(|arguments| {
arguments
.args
.iter()
.any(|base| is_model_form(base, checker.semantic()))
}) {
return None;
}
for element in &class_def.body {
for element in body {
let Stmt::ClassDef(ast::StmtClassDef { name, body, .. }) = element else {
continue;
};
@@ -73,18 +83,12 @@ pub(crate) fn all_with_model_form(checker: &mut Checker, class_def: &ast::StmtCl
match value.as_ref() {
Expr::StringLiteral(ast::ExprStringLiteral { value, .. }) => {
if value == "__all__" {
checker
.diagnostics
.push(Diagnostic::new(DjangoAllWithModelForm, element.range()));
return;
return Some(Diagnostic::new(DjangoAllWithModelForm, element.range()));
}
}
Expr::BytesLiteral(ast::ExprBytesLiteral { value, .. }) => {
if value == "__all__".as_bytes() {
checker
.diagnostics
.push(Diagnostic::new(DjangoAllWithModelForm, element.range()));
return;
return Some(Diagnostic::new(DjangoAllWithModelForm, element.range()));
}
}
_ => (),
@@ -92,4 +96,5 @@ pub(crate) fn all_with_model_form(checker: &mut Checker, class_def: &ast::StmtCl
}
}
}
None
}

View File

@@ -1,6 +1,7 @@
use ruff_python_ast::{self as ast, Arguments, Expr, Stmt};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::{self as ast, Expr, Stmt};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
@@ -45,12 +46,21 @@ impl Violation for DjangoExcludeWithModelForm {
}
/// DJ006
pub(crate) fn exclude_with_model_form(checker: &mut Checker, class_def: &ast::StmtClassDef) {
if !is_model_form(class_def, checker.semantic()) {
return;
pub(crate) fn exclude_with_model_form(
checker: &Checker,
arguments: Option<&Arguments>,
body: &[Stmt],
) -> Option<Diagnostic> {
if !arguments.is_some_and(|arguments| {
arguments
.args
.iter()
.any(|base| is_model_form(base, checker.semantic()))
}) {
return None;
}
for element in &class_def.body {
for element in body {
let Stmt::ClassDef(ast::StmtClassDef { name, body, .. }) = element else {
continue;
};
@@ -66,12 +76,10 @@ pub(crate) fn exclude_with_model_form(checker: &mut Checker, class_def: &ast::St
continue;
};
if id == "exclude" {
checker
.diagnostics
.push(Diagnostic::new(DjangoExcludeWithModelForm, target.range()));
return;
return Some(Diagnostic::new(DjangoExcludeWithModelForm, target.range()));
}
}
}
}
None
}

View File

@@ -1,17 +1,17 @@
use ruff_python_ast::{self as ast, Expr};
use ruff_python_ast::Expr;
use ruff_python_semantic::{analyze, SemanticModel};
use ruff_python_semantic::SemanticModel;
/// Return `true` if a Python class appears to be a Django model, based on its base classes.
pub(super) fn is_model(class_def: &ast::StmtClassDef, semantic: &SemanticModel) -> bool {
analyze::class::any_over_body(class_def, semantic, &|call_path| {
pub(super) fn is_model(base: &Expr, semantic: &SemanticModel) -> bool {
semantic.resolve_call_path(base).is_some_and(|call_path| {
matches!(call_path.as_slice(), ["django", "db", "models", "Model"])
})
}
/// Return `true` if a Python class appears to be a Django model form, based on its base classes.
pub(super) fn is_model_form(class_def: &ast::StmtClassDef, semantic: &SemanticModel) -> bool {
analyze::class::any_over_body(class_def, semantic, &|call_path| {
pub(super) fn is_model_form(base: &Expr, semantic: &SemanticModel) -> bool {
semantic.resolve_call_path(base).is_some_and(|call_path| {
matches!(
call_path.as_slice(),
["django", "forms", "ModelForm"] | ["django", "forms", "models", "ModelForm"]

View File

@@ -1,9 +1,10 @@
use ruff_python_ast::{self as ast, Arguments, Expr, Stmt};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::is_const_true;
use ruff_python_ast::identifier::Identifier;
use ruff_python_ast::{self as ast, Expr, Stmt};
use ruff_python_semantic::SemanticModel;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
@@ -51,39 +52,57 @@ impl Violation for DjangoModelWithoutDunderStr {
}
/// DJ008
pub(crate) fn model_without_dunder_str(checker: &mut Checker, class_def: &ast::StmtClassDef) {
if !is_non_abstract_model(class_def, checker.semantic()) {
pub(crate) fn model_without_dunder_str(
checker: &mut Checker,
ast::StmtClassDef {
name,
arguments,
body,
..
}: &ast::StmtClassDef,
) {
if !is_non_abstract_model(arguments.as_deref(), body, checker.semantic()) {
return;
}
if has_dunder_method(class_def) {
if has_dunder_method(body) {
return;
}
checker.diagnostics.push(Diagnostic::new(
DjangoModelWithoutDunderStr,
class_def.identifier(),
));
checker
.diagnostics
.push(Diagnostic::new(DjangoModelWithoutDunderStr, name.range()));
}
/// Returns `true` if the class has `__str__` method.
fn has_dunder_method(class_def: &ast::StmtClassDef) -> bool {
class_def.body.iter().any(|val| match val {
Stmt::FunctionDef(ast::StmtFunctionDef { name, .. }) => name == "__str__",
fn has_dunder_method(body: &[Stmt]) -> bool {
body.iter().any(|val| match val {
Stmt::FunctionDef(ast::StmtFunctionDef { name, .. }) => {
if name == "__str__" {
return true;
}
false
}
_ => false,
})
}
/// Returns `true` if the class is a non-abstract Django model.
fn is_non_abstract_model(class_def: &ast::StmtClassDef, semantic: &SemanticModel) -> bool {
if class_def.bases().is_empty() || is_model_abstract(class_def) {
false
} else {
helpers::is_model(class_def, semantic)
fn is_non_abstract_model(
arguments: Option<&Arguments>,
body: &[Stmt],
semantic: &SemanticModel,
) -> bool {
let Some(Arguments { args: bases, .. }) = arguments else {
return false;
};
if is_model_abstract(body) {
return false;
}
bases.iter().any(|base| helpers::is_model(base, semantic))
}
/// Check if class is abstract, in terms of Django model inheritance.
fn is_model_abstract(class_def: &ast::StmtClassDef) -> bool {
for element in &class_def.body {
fn is_model_abstract(body: &[Stmt]) -> bool {
for element in body {
let Stmt::ClassDef(ast::StmtClassDef { name, body, .. }) = element else {
continue;
};

View File

@@ -1,8 +1,9 @@
use std::fmt;
use ruff_python_ast::{self as ast, Arguments, Expr, Stmt};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::{self as ast, Expr, Stmt};
use ruff_python_semantic::SemanticModel;
use ruff_text_size::Ranged;
@@ -78,50 +79,6 @@ impl Violation for DjangoUnorderedBodyContentInModel {
}
}
/// DJ012
pub(crate) fn unordered_body_content_in_model(
checker: &mut Checker,
class_def: &ast::StmtClassDef,
) {
if !helpers::is_model(class_def, checker.semantic()) {
return;
}
// Track all the element types we've seen so far.
let mut element_types = Vec::new();
let mut prev_element_type = None;
for element in &class_def.body {
let Some(element_type) = get_element_type(element, checker.semantic()) else {
continue;
};
// Skip consecutive elements of the same type. It's less noisy to only report
// violations at type boundaries (e.g., avoid raising a violation for _every_
// field declaration that's out of order).
if prev_element_type == Some(element_type) {
continue;
}
prev_element_type = Some(element_type);
if let Some(&prev_element_type) = element_types
.iter()
.find(|&&prev_element_type| prev_element_type > element_type)
{
let diagnostic = Diagnostic::new(
DjangoUnorderedBodyContentInModel {
element_type,
prev_element_type,
},
element.range(),
);
checker.diagnostics.push(diagnostic);
} else {
element_types.push(element_type);
}
}
}
#[derive(Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq)]
enum ContentType {
FieldDeclaration,
@@ -183,3 +140,53 @@ fn get_element_type(element: &Stmt, semantic: &SemanticModel) -> Option<ContentT
_ => None,
}
}
/// DJ012
pub(crate) fn unordered_body_content_in_model(
checker: &mut Checker,
arguments: Option<&Arguments>,
body: &[Stmt],
) {
if !arguments.is_some_and(|arguments| {
arguments
.args
.iter()
.any(|base| helpers::is_model(base, checker.semantic()))
}) {
return;
}
// Track all the element types we've seen so far.
let mut element_types = Vec::new();
let mut prev_element_type = None;
for element in body {
let Some(element_type) = get_element_type(element, checker.semantic()) else {
continue;
};
// Skip consecutive elements of the same type. It's less noisy to only report
// violations at type boundaries (e.g., avoid raising a violation for _every_
// field declaration that's out of order).
if prev_element_type == Some(element_type) {
continue;
}
prev_element_type = Some(element_type);
if let Some(&prev_element_type) = element_types
.iter()
.find(|&&prev_element_type| prev_element_type > element_type)
{
let diagnostic = Diagnostic::new(
DjangoUnorderedBodyContentInModel {
element_type,
prev_element_type,
},
element.range(),
);
checker.diagnostics.push(diagnostic);
} else {
element_types.push(element_type);
}
}
}

View File

@@ -54,12 +54,4 @@ DJ012.py:129:5: DJ012 Order of model's inner classes, methods, and fields does n
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ DJ012
|
DJ012.py:146:5: DJ012 Order of model's inner classes, methods, and fields does not follow the Django Style Guide: field declaration should come before `Meta` class
|
144 | return "foobar"
145 |
146 | first_name = models.CharField(max_length=32)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ DJ012
|

View File

@@ -1,10 +1,10 @@
use ruff_diagnostics::{Diagnostic, Fix, FixAvailability, Violation};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast as ast;
use ruff_python_ast::{self as ast};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix::edits::delete_stmt;
use crate::registry::AsRule;
/// ## What it does
@@ -28,24 +28,14 @@ use crate::registry::AsRule;
/// def add_numbers(a, b):
/// return a + b
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe, as it may remove `print` statements
/// that are used beyond debugging purposes.
#[violation]
pub struct Print;
impl Violation for Print {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
format!("`print` found")
}
fn fix_title(&self) -> Option<String> {
Some("Remove `print`".to_string())
}
}
/// ## What it does
@@ -75,29 +65,19 @@ impl Violation for Print {
/// dict_c = {**dict_a, **dict_b}
/// return dict_c
/// ```
///
/// ## Fix safety
/// This rule's fix is marked as unsafe, as it may remove `pprint` statements
/// that are used beyond debugging purposes.
#[violation]
pub struct PPrint;
impl Violation for PPrint {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
format!("`pprint` found")
}
fn fix_title(&self) -> Option<String> {
Some("Remove `pprint`".to_string())
}
}
/// T201, T203
pub(crate) fn print_call(checker: &mut Checker, call: &ast::ExprCall) {
let mut diagnostic = {
let diagnostic = {
let call_path = checker.semantic().resolve_call_path(&call.func);
if call_path
.as_ref()
@@ -133,15 +113,5 @@ pub(crate) fn print_call(checker: &mut Checker, call: &ast::ExprCall) {
return;
}
// Remove the `print`, if it's a standalone statement.
if checker.semantic().current_expression_parent().is_none() {
let statement = checker.semantic().current_statement();
let parent = checker.semantic().current_statement_parent();
let edit = delete_stmt(statement, parent, checker.locator(), checker.indexer());
diagnostic.set_fix(Fix::unsafe_edit(edit).isolate(Checker::isolation(
checker.semantic().current_statement_parent_id(),
)));
}
checker.diagnostics.push(diagnostic);
}

View File

@@ -1,7 +1,7 @@
---
source: crates/ruff_linter/src/rules/flake8_print/mod.rs
---
T201.py:4:1: T201 [*] `print` found
T201.py:4:1: T201 `print` found
|
2 | import tempfile
3 |
@@ -10,18 +10,8 @@ T201.py:4:1: T201 [*] `print` found
5 | print("Hello, world!", file=None) # T201
6 | print("Hello, world!", file=sys.stdout) # T201
|
= help: Remove `print`
Unsafe fix
1 1 | import sys
2 2 | import tempfile
3 3 |
4 |-print("Hello, world!") # T201
5 4 | print("Hello, world!", file=None) # T201
6 5 | print("Hello, world!", file=sys.stdout) # T201
7 6 | print("Hello, world!", file=sys.stderr) # T201
T201.py:5:1: T201 [*] `print` found
T201.py:5:1: T201 `print` found
|
4 | print("Hello, world!") # T201
5 | print("Hello, world!", file=None) # T201
@@ -29,18 +19,8 @@ T201.py:5:1: T201 [*] `print` found
6 | print("Hello, world!", file=sys.stdout) # T201
7 | print("Hello, world!", file=sys.stderr) # T201
|
= help: Remove `print`
Unsafe fix
2 2 | import tempfile
3 3 |
4 4 | print("Hello, world!") # T201
5 |-print("Hello, world!", file=None) # T201
6 5 | print("Hello, world!", file=sys.stdout) # T201
7 6 | print("Hello, world!", file=sys.stderr) # T201
8 7 |
T201.py:6:1: T201 [*] `print` found
T201.py:6:1: T201 `print` found
|
4 | print("Hello, world!") # T201
5 | print("Hello, world!", file=None) # T201
@@ -48,18 +28,8 @@ T201.py:6:1: T201 [*] `print` found
| ^^^^^ T201
7 | print("Hello, world!", file=sys.stderr) # T201
|
= help: Remove `print`
Unsafe fix
3 3 |
4 4 | print("Hello, world!") # T201
5 5 | print("Hello, world!", file=None) # T201
6 |-print("Hello, world!", file=sys.stdout) # T201
7 6 | print("Hello, world!", file=sys.stderr) # T201
8 7 |
9 8 | with tempfile.NamedTemporaryFile() as fp:
T201.py:7:1: T201 [*] `print` found
T201.py:7:1: T201 `print` found
|
5 | print("Hello, world!", file=None) # T201
6 | print("Hello, world!", file=sys.stdout) # T201
@@ -68,15 +38,5 @@ T201.py:7:1: T201 [*] `print` found
8 |
9 | with tempfile.NamedTemporaryFile() as fp:
|
= help: Remove `print`
Unsafe fix
4 4 | print("Hello, world!") # T201
5 5 | print("Hello, world!", file=None) # T201
6 6 | print("Hello, world!", file=sys.stdout) # T201
7 |-print("Hello, world!", file=sys.stderr) # T201
8 7 |
9 8 | with tempfile.NamedTemporaryFile() as fp:
10 9 | print("Hello, world!", file=fp) # OK

View File

@@ -1,7 +1,7 @@
---
source: crates/ruff_linter/src/rules/flake8_print/mod.rs
---
T203.py:3:1: T203 [*] `pprint` found
T203.py:3:1: T203 `pprint` found
|
1 | from pprint import pprint
2 |
@@ -10,17 +10,8 @@ T203.py:3:1: T203 [*] `pprint` found
4 |
5 | import pprint
|
= help: Remove `pprint`
Unsafe fix
1 1 | from pprint import pprint
2 2 |
3 |-pprint("Hello, world!") # T203
4 3 |
5 4 | import pprint
6 5 |
T203.py:7:1: T203 [*] `pprint` found
T203.py:7:1: T203 `pprint` found
|
5 | import pprint
6 |
@@ -29,14 +20,5 @@ T203.py:7:1: T203 [*] `pprint` found
8 |
9 | pprint.pformat("Hello, world!")
|
= help: Remove `pprint`
Unsafe fix
4 4 |
5 5 | import pprint
6 6 |
7 |-pprint.pprint("Hello, world!") # T203
8 7 |
9 8 | pprint.pformat("Hello, world!")

View File

@@ -0,0 +1,54 @@
use ruff_python_ast::{self as ast, Expr, Operator};
use ruff_python_semantic::SemanticModel;
/// Traverse a "union" type annotation, applying `func` to each union member.
/// Supports traversal of `Union` and `|` union expressions.
/// The function is called with each expression in the union (excluding declarations of nested unions)
/// and the parent expression (if any).
pub(super) fn traverse_union<'a, F>(
func: &mut F,
semantic: &SemanticModel,
expr: &'a Expr,
parent: Option<&'a Expr>,
) where
F: FnMut(&'a Expr, Option<&'a Expr>),
{
// Ex) x | y
if let Expr::BinOp(ast::ExprBinOp {
op: Operator::BitOr,
left,
right,
range: _,
}) = expr
{
// The union data structure usually looks like this:
// a | b | c -> (a | b) | c
//
// However, parenthesized expressions can coerce it into any structure:
// a | (b | c)
//
// So we have to traverse both branches in order (left, then right), to report members
// in the order they appear in the source code.
// Traverse the left then right arms
traverse_union(func, semantic, left, Some(expr));
traverse_union(func, semantic, right, Some(expr));
return;
}
// Ex) Union[x, y]
if let Expr::Subscript(ast::ExprSubscript { value, slice, .. }) = expr {
if semantic.match_typing_expr(value, "Union") {
if let Expr::Tuple(ast::ExprTuple { elts, .. }) = slice.as_ref() {
// Traverse each element of the tuple within the union recursively to handle cases
// such as `Union[..., Union[...]]
elts.iter()
.for_each(|elt| traverse_union(func, semantic, elt, Some(expr)));
return;
}
}
}
// Otherwise, call the function on expression
func(expr, parent);
}

View File

@@ -1,4 +1,5 @@
//! Rules from [flake8-pyi](https://pypi.org/project/flake8-pyi/).
mod helpers;
pub(crate) mod rules;
#[cfg(test)]

View File

@@ -1,16 +1,15 @@
use ruff_python_ast::{self as ast, Expr};
use rustc_hash::FxHashSet;
use std::collections::HashSet;
use rustc_hash::FxHashSet;
use crate::checkers::ast::Checker;
use crate::rules::flake8_pyi::helpers::traverse_union;
use ruff_diagnostics::{Diagnostic, Edit, Fix, FixAvailability, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::comparable::ComparableExpr;
use ruff_python_ast::{self as ast, Expr};
use ruff_python_semantic::analyze::typing::traverse_union;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for duplicate union members.
///
@@ -56,7 +55,7 @@ pub(crate) fn duplicate_union_member<'a>(checker: &mut Checker, expr: &'a Expr)
let mut diagnostics: Vec<Diagnostic> = Vec::new();
// Adds a member to `literal_exprs` if it is a `Literal` annotation
let mut check_for_duplicate_members = |expr: &'a Expr, parent: &'a Expr| {
let mut check_for_duplicate_members = |expr: &'a Expr, parent: Option<&'a Expr>| {
// If we've already seen this union member, raise a violation.
if !seen_nodes.insert(expr.into()) {
let mut diagnostic = Diagnostic::new(
@@ -69,7 +68,7 @@ pub(crate) fn duplicate_union_member<'a>(checker: &mut Checker, expr: &'a Expr)
// parent without the duplicate.
// If the parent node is not a `BinOp` we will not perform a fix
if let Expr::BinOp(ast::ExprBinOp { left, right, .. }) = parent {
if let Some(parent @ Expr::BinOp(ast::ExprBinOp { left, right, .. })) = parent {
// Replace the parent with its non-duplicate child.
let child = if expr == left.as_ref() { right } else { left };
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(
@@ -82,7 +81,12 @@ pub(crate) fn duplicate_union_member<'a>(checker: &mut Checker, expr: &'a Expr)
};
// Traverse the union, collect all diagnostic members
traverse_union(&mut check_for_duplicate_members, checker.semantic(), expr);
traverse_union(
&mut check_for_duplicate_members,
checker.semantic(),
expr,
None,
);
// Add all diagnostics to the checker
checker.diagnostics.append(&mut diagnostics);

View File

@@ -1,16 +1,14 @@
use std::fmt;
use rustc_hash::FxHashSet;
use std::fmt;
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::{self as ast, Expr, LiteralExpressionRef};
use ruff_python_semantic::analyze::typing::traverse_union;
use ruff_python_semantic::SemanticModel;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix::snippet::SourceCodeSnippet;
use crate::{checkers::ast::Checker, rules::flake8_pyi::helpers::traverse_union};
/// ## What it does
/// Checks for the presence of redundant `Literal` types and builtin super
@@ -66,7 +64,7 @@ pub(crate) fn redundant_literal_union<'a>(checker: &mut Checker, union: &'a Expr
// Adds a member to `literal_exprs` for each value in a `Literal`, and any builtin types
// to `builtin_types_in_union`.
let mut func = |expr: &'a Expr, _parent: &'a Expr| {
let mut func = |expr: &'a Expr, _| {
if let Expr::Subscript(ast::ExprSubscript { value, slice, .. }) = expr {
if checker.semantic().match_typing_expr(value, "Literal") {
if let Expr::Tuple(ast::ExprTuple { elts, .. }) = slice.as_ref() {
@@ -84,7 +82,7 @@ pub(crate) fn redundant_literal_union<'a>(checker: &mut Checker, union: &'a Expr
builtin_types_in_union.insert(builtin_type);
};
traverse_union(&mut func, checker.semantic(), union);
traverse_union(&mut func, checker.semantic(), union, None);
for typing_literal_expr in typing_literal_exprs {
let Some(literal_type) = match_literal_type(typing_literal_expr) else {

View File

@@ -1,10 +1,11 @@
use ruff_python_ast::{Expr, Parameters};
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::{Expr, Parameters};
use ruff_python_semantic::analyze::typing::traverse_union;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::rules::flake8_pyi::helpers::traverse_union;
/// ## What it does
/// Checks for union annotations that contain redundant numeric types (e.g.,
@@ -89,7 +90,7 @@ fn check_annotation(checker: &mut Checker, annotation: &Expr) {
let mut has_complex = false;
let mut has_int = false;
let mut func = |expr: &Expr, _parent: &Expr| {
let mut func = |expr: &Expr, _parent: Option<&Expr>| {
let Some(call_path) = checker.semantic().resolve_call_path(expr) else {
return;
};
@@ -102,7 +103,7 @@ fn check_annotation(checker: &mut Checker, annotation: &Expr) {
}
};
traverse_union(&mut func, checker.semantic(), annotation);
traverse_union(&mut func, checker.semantic(), annotation, None);
if has_complex {
if has_float {

View File

@@ -1,12 +1,13 @@
use ast::Operator;
use ast::{ExprSubscript, Operator};
use ruff_diagnostics::{Diagnostic, Edit, Fix, FixAvailability, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::{self as ast, Expr};
use ruff_python_semantic::analyze::typing::traverse_union;
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::rules::flake8_pyi::helpers::traverse_union;
/// ## What it does
/// Checks for the presence of multiple literal types in a union.
///
@@ -61,7 +62,7 @@ fn concatenate_bin_ors(exprs: Vec<&Expr>) -> Expr {
})
}
fn make_union(subscript: &ast::ExprSubscript, exprs: Vec<&Expr>) -> Expr {
fn make_union(subscript: &ExprSubscript, exprs: Vec<&Expr>) -> Expr {
Expr::Subscript(ast::ExprSubscript {
value: subscript.value.clone(),
slice: Box::new(Expr::Tuple(ast::ExprTuple {
@@ -107,7 +108,7 @@ pub(crate) fn unnecessary_literal_union<'a>(checker: &mut Checker, expr: &'a Exp
let mut total_literals = 0;
// Split members into `literal_exprs` if they are a `Literal` annotation and `other_exprs` otherwise
let mut collect_literal_expr = |expr: &'a Expr, _parent: &'a Expr| {
let mut collect_literal_expr = |expr: &'a Expr, _| {
if let Expr::Subscript(ast::ExprSubscript { value, slice, .. }) = expr {
if checker.semantic().match_typing_expr(value, "Literal") {
total_literals += 1;
@@ -136,7 +137,7 @@ pub(crate) fn unnecessary_literal_union<'a>(checker: &mut Checker, expr: &'a Exp
};
// Traverse the union, collect all members, split out the literals from the rest.
traverse_union(&mut collect_literal_expr, checker.semantic(), expr);
traverse_union(&mut collect_literal_expr, checker.semantic(), expr, None);
let union_subscript = expr.as_subscript_expr();
if union_subscript.is_some_and(|subscript| {

View File

@@ -2,10 +2,9 @@ use ast::{ExprContext, Operator};
use ruff_diagnostics::{Diagnostic, Edit, Fix, FixAvailability, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::{self as ast, Expr};
use ruff_python_semantic::analyze::typing::traverse_union;
use ruff_text_size::{Ranged, TextRange};
use crate::checkers::ast::Checker;
use crate::{checkers::ast::Checker, rules::flake8_pyi::helpers::traverse_union};
/// ## What it does
/// Checks for the presence of multiple `type`s in a union.
@@ -81,28 +80,21 @@ pub(crate) fn unnecessary_type_union<'a>(checker: &mut Checker, union: &'a Expr)
}
let mut type_exprs = Vec::new();
let mut other_exprs = Vec::new();
let mut collect_type_exprs = |expr: &'a Expr, _parent: &'a Expr| {
let subscript = expr.as_subscript_expr();
if subscript.is_none() {
other_exprs.push(expr);
} else {
let unwrapped = subscript.unwrap();
if checker
.semantic()
.resolve_call_path(unwrapped.value.as_ref())
.is_some_and(|call_path| matches!(call_path.as_slice(), ["" | "builtins", "type"]))
{
type_exprs.push(&unwrapped.slice);
} else {
other_exprs.push(expr);
}
let mut collect_type_exprs = |expr: &'a Expr, _| {
let Some(subscript) = expr.as_subscript_expr() else {
return;
};
if checker
.semantic()
.resolve_call_path(subscript.value.as_ref())
.is_some_and(|call_path| matches!(call_path.as_slice(), ["" | "builtins", "type"]))
{
type_exprs.push(&subscript.slice);
}
};
traverse_union(&mut collect_type_exprs, checker.semantic(), union);
traverse_union(&mut collect_type_exprs, checker.semantic(), union, None);
if type_exprs.len() > 1 {
let type_members: Vec<String> = type_exprs
@@ -121,82 +113,55 @@ pub(crate) fn unnecessary_type_union<'a>(checker: &mut Checker, union: &'a Expr)
if checker.semantic().is_builtin("type") {
let content = if let Some(subscript) = subscript {
let types = &Expr::Subscript(ast::ExprSubscript {
value: Box::new(Expr::Name(ast::ExprName {
id: "type".into(),
ctx: ExprContext::Load,
range: TextRange::default(),
})),
slice: Box::new(Expr::Subscript(ast::ExprSubscript {
value: subscript.value.clone(),
slice: Box::new(Expr::Tuple(ast::ExprTuple {
elts: type_members
.into_iter()
.map(|type_member| {
Expr::Name(ast::ExprName {
id: type_member,
ctx: ExprContext::Load,
range: TextRange::default(),
checker
.generator()
.expr(&Expr::Subscript(ast::ExprSubscript {
value: Box::new(Expr::Name(ast::ExprName {
id: "type".into(),
ctx: ExprContext::Load,
range: TextRange::default(),
})),
slice: Box::new(Expr::Subscript(ast::ExprSubscript {
value: subscript.value.clone(),
slice: Box::new(Expr::Tuple(ast::ExprTuple {
elts: type_members
.into_iter()
.map(|type_member| {
Expr::Name(ast::ExprName {
id: type_member,
ctx: ExprContext::Load,
range: TextRange::default(),
})
})
})
.collect(),
.collect(),
ctx: ExprContext::Load,
range: TextRange::default(),
})),
ctx: ExprContext::Load,
range: TextRange::default(),
})),
ctx: ExprContext::Load,
range: TextRange::default(),
})),
ctx: ExprContext::Load,
range: TextRange::default(),
});
if other_exprs.is_empty() {
checker.generator().expr(types)
} else {
let mut exprs = Vec::new();
exprs.push(types);
exprs.extend(other_exprs);
let union = Expr::Subscript(ast::ExprSubscript {
value: subscript.value.clone(),
slice: Box::new(Expr::Tuple(ast::ExprTuple {
elts: exprs.into_iter().cloned().collect(),
ctx: ExprContext::Load,
range: TextRange::default(),
})),
ctx: ExprContext::Load,
range: TextRange::default(),
});
checker.generator().expr(&union)
}
}))
} else {
let types = &Expr::Subscript(ast::ExprSubscript {
value: Box::new(Expr::Name(ast::ExprName {
id: "type".into(),
checker
.generator()
.expr(&Expr::Subscript(ast::ExprSubscript {
value: Box::new(Expr::Name(ast::ExprName {
id: "type".into(),
ctx: ExprContext::Load,
range: TextRange::default(),
})),
slice: Box::new(concatenate_bin_ors(
type_exprs
.clone()
.into_iter()
.map(std::convert::AsRef::as_ref)
.collect(),
)),
ctx: ExprContext::Load,
range: TextRange::default(),
})),
slice: Box::new(concatenate_bin_ors(
type_exprs
.clone()
.into_iter()
.map(std::convert::AsRef::as_ref)
.collect(),
)),
ctx: ExprContext::Load,
range: TextRange::default(),
});
if other_exprs.is_empty() {
checker.generator().expr(types)
} else {
let mut exprs = Vec::new();
exprs.push(types);
exprs.extend(other_exprs);
checker.generator().expr(&concatenate_bin_ors(exprs))
}
}))
};
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement(

View File

@@ -7,35 +7,28 @@ use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for the presence of unused private `TypeVar`, `ParamSpec` or
/// `TypeVarTuple` declarations.
/// Checks for the presence of unused private `TypeVar` declarations.
///
/// ## Why is this bad?
/// A private `TypeVar` that is defined but not used is likely a mistake. It
/// A private `TypeVar` that is defined but not used is likely a mistake, and
/// should either be used, made public, or removed to avoid confusion.
///
/// ## Example
/// ```python
/// import typing
/// import typing_extensions
///
/// _T = typing.TypeVar("_T")
/// _Ts = typing_extensions.TypeVarTuple("_Ts")
/// ```
#[violation]
pub struct UnusedPrivateTypeVar {
type_var_like_name: String,
type_var_like_kind: String,
name: String,
}
impl Violation for UnusedPrivateTypeVar {
#[derive_message_formats]
fn message(&self) -> String {
let UnusedPrivateTypeVar {
type_var_like_name,
type_var_like_kind,
} = self;
format!("Private {type_var_like_kind} `{type_var_like_name}` is never used")
let UnusedPrivateTypeVar { name } = self;
format!("Private TypeVar `{name}` is never used")
}
}
@@ -192,26 +185,13 @@ pub(crate) fn unused_private_type_var(
let Expr::Call(ast::ExprCall { func, .. }) = value.as_ref() else {
continue;
};
let semantic = checker.semantic();
let Some(type_var_like_kind) = semantic.resolve_call_path(func).and_then(|call_path| {
if semantic.match_typing_call_path(&call_path, "TypeVar") {
Some("TypeVar")
} else if semantic.match_typing_call_path(&call_path, "ParamSpec") {
Some("ParamSpec")
} else if semantic.match_typing_call_path(&call_path, "TypeVarTuple") {
Some("TypeVarTuple")
} else {
None
}
}) else {
if !checker.semantic().match_typing_expr(func, "TypeVar") {
continue;
};
}
diagnostics.push(Diagnostic::new(
UnusedPrivateTypeVar {
type_var_like_name: id.to_string(),
type_var_like_kind: type_var_like_kind.to_string(),
name: id.to_string(),
},
binding.range(),
));

View File

@@ -1,52 +1,22 @@
---
source: crates/ruff_linter/src/rules/flake8_pyi/mod.rs
---
PYI018.py:6:1: PYI018 Private TypeVar `_T` is never used
PYI018.py:4:1: PYI018 Private TypeVar `_T` is never used
|
4 | from typing_extensions import ParamSpec, TypeVarTuple
5 |
6 | _T = typing.TypeVar("_T")
2 | from typing import TypeVar
3 |
4 | _T = typing.TypeVar("_T")
| ^^ PYI018
7 | _Ts = typing_extensions.TypeVarTuple("_Ts")
8 | _P = ParamSpec("_P")
5 | _P = TypeVar("_P")
|
PYI018.py:7:1: PYI018 Private TypeVarTuple `_Ts` is never used
PYI018.py:5:1: PYI018 Private TypeVar `_P` is never used
|
6 | _T = typing.TypeVar("_T")
7 | _Ts = typing_extensions.TypeVarTuple("_Ts")
| ^^^ PYI018
8 | _P = ParamSpec("_P")
9 | _P2 = typing.ParamSpec("_P2")
4 | _T = typing.TypeVar("_T")
5 | _P = TypeVar("_P")
| ^^ PYI018
6 |
7 | # OK
|
PYI018.py:8:1: PYI018 Private ParamSpec `_P` is never used
|
6 | _T = typing.TypeVar("_T")
7 | _Ts = typing_extensions.TypeVarTuple("_Ts")
8 | _P = ParamSpec("_P")
| ^^ PYI018
9 | _P2 = typing.ParamSpec("_P2")
10 | _Ts2 = TypeVarTuple("_Ts2")
|
PYI018.py:9:1: PYI018 Private ParamSpec `_P2` is never used
|
7 | _Ts = typing_extensions.TypeVarTuple("_Ts")
8 | _P = ParamSpec("_P")
9 | _P2 = typing.ParamSpec("_P2")
| ^^^ PYI018
10 | _Ts2 = TypeVarTuple("_Ts2")
|
PYI018.py:10:1: PYI018 Private TypeVarTuple `_Ts2` is never used
|
8 | _P = ParamSpec("_P")
9 | _P2 = typing.ParamSpec("_P2")
10 | _Ts2 = TypeVarTuple("_Ts2")
| ^^^^ PYI018
11 |
12 | # OK
|

View File

@@ -1,52 +1,22 @@
---
source: crates/ruff_linter/src/rules/flake8_pyi/mod.rs
---
PYI018.pyi:6:1: PYI018 Private TypeVar `_T` is never used
PYI018.pyi:4:1: PYI018 Private TypeVar `_T` is never used
|
4 | from typing_extensions import ParamSpec, TypeVarTuple
5 |
6 | _T = typing.TypeVar("_T")
2 | from typing import TypeVar
3 |
4 | _T = typing.TypeVar("_T")
| ^^ PYI018
7 | _Ts = typing_extensions.TypeVarTuple("_Ts")
8 | _P = ParamSpec("_P")
5 | _P = TypeVar("_P")
|
PYI018.pyi:7:1: PYI018 Private TypeVarTuple `_Ts` is never used
PYI018.pyi:5:1: PYI018 Private TypeVar `_P` is never used
|
6 | _T = typing.TypeVar("_T")
7 | _Ts = typing_extensions.TypeVarTuple("_Ts")
| ^^^ PYI018
8 | _P = ParamSpec("_P")
9 | _P2 = typing.ParamSpec("_P2")
4 | _T = typing.TypeVar("_T")
5 | _P = TypeVar("_P")
| ^^ PYI018
6 |
7 | # OK
|
PYI018.pyi:8:1: PYI018 Private ParamSpec `_P` is never used
|
6 | _T = typing.TypeVar("_T")
7 | _Ts = typing_extensions.TypeVarTuple("_Ts")
8 | _P = ParamSpec("_P")
| ^^ PYI018
9 | _P2 = typing.ParamSpec("_P2")
10 | _Ts2 = TypeVarTuple("_Ts2")
|
PYI018.pyi:9:1: PYI018 Private ParamSpec `_P2` is never used
|
7 | _Ts = typing_extensions.TypeVarTuple("_Ts")
8 | _P = ParamSpec("_P")
9 | _P2 = typing.ParamSpec("_P2")
| ^^^ PYI018
10 | _Ts2 = TypeVarTuple("_Ts2")
|
PYI018.pyi:10:1: PYI018 Private TypeVarTuple `_Ts2` is never used
|
8 | _P = ParamSpec("_P")
9 | _P2 = typing.ParamSpec("_P2")
10 | _Ts2 = TypeVarTuple("_Ts2")
| ^^^^ PYI018
11 |
12 | # OK
|

View File

@@ -54,91 +54,5 @@ PYI055.py:39:8: PYI055 [*] Multiple `type` members in a union. Combine them into
38 38 | # PYI055
39 |- x: Union[type[requests_mock.Mocker], type[httpretty], type[str]] = requests_mock.Mocker
39 |+ x: type[Union[requests_mock.Mocker, httpretty, str]] = requests_mock.Mocker
40 40 |
41 41 |
42 42 | def convert_union(union: UnionType) -> _T | None:
PYI055.py:44:9: PYI055 [*] Multiple `type` members in a union. Combine them into one, e.g., `type[_T | Converter[_T]]`.
|
42 | def convert_union(union: UnionType) -> _T | None:
43 | converters: tuple[
44 | type[_T] | type[Converter[_T]] | Converter[_T] | Callable[[str], _T], ... # PYI055
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ PYI055
45 | ] = union.__args__
46 | ...
|
= help: Combine multiple `type` members
Safe fix
41 41 |
42 42 | def convert_union(union: UnionType) -> _T | None:
43 43 | converters: tuple[
44 |- type[_T] | type[Converter[_T]] | Converter[_T] | Callable[[str], _T], ... # PYI055
44 |+ type[_T | Converter[_T]] | Converter[_T] | Callable[[str], _T], ... # PYI055
45 45 | ] = union.__args__
46 46 | ...
47 47 |
PYI055.py:50:15: PYI055 [*] Multiple `type` members in a union. Combine them into one, e.g., `type[_T | Converter[_T]]`.
|
48 | def convert_union(union: UnionType) -> _T | None:
49 | converters: tuple[
50 | Union[type[_T] | type[Converter[_T]] | Converter[_T] | Callable[[str], _T]], ... # PYI055
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ PYI055
51 | ] = union.__args__
52 | ...
|
= help: Combine multiple `type` members
Safe fix
47 47 |
48 48 | def convert_union(union: UnionType) -> _T | None:
49 49 | converters: tuple[
50 |- Union[type[_T] | type[Converter[_T]] | Converter[_T] | Callable[[str], _T]], ... # PYI055
50 |+ Union[type[_T | Converter[_T]] | Converter[_T] | Callable[[str], _T]], ... # PYI055
51 51 | ] = union.__args__
52 52 | ...
53 53 |
PYI055.py:56:15: PYI055 [*] Multiple `type` members in a union. Combine them into one, e.g., `type[_T | Converter[_T]]`.
|
54 | def convert_union(union: UnionType) -> _T | None:
55 | converters: tuple[
56 | Union[type[_T] | type[Converter[_T]]] | Converter[_T] | Callable[[str], _T], ... # PYI055
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ PYI055
57 | ] = union.__args__
58 | ...
|
= help: Combine multiple `type` members
Safe fix
53 53 |
54 54 | def convert_union(union: UnionType) -> _T | None:
55 55 | converters: tuple[
56 |- Union[type[_T] | type[Converter[_T]]] | Converter[_T] | Callable[[str], _T], ... # PYI055
56 |+ Union[type[_T | Converter[_T]]] | Converter[_T] | Callable[[str], _T], ... # PYI055
57 57 | ] = union.__args__
58 58 | ...
59 59 |
PYI055.py:62:15: PYI055 [*] Multiple `type` members in a union. Combine them into one, e.g., `type[_T | Converter[_T]]`.
|
60 | def convert_union(union: UnionType) -> _T | None:
61 | converters: tuple[
62 | Union[type[_T] | type[Converter[_T]] | str] | Converter[_T] | Callable[[str], _T], ... # PYI055
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ PYI055
63 | ] = union.__args__
64 | ...
|
= help: Combine multiple `type` members
Safe fix
59 59 |
60 60 | def convert_union(union: UnionType) -> _T | None:
61 61 | converters: tuple[
62 |- Union[type[_T] | type[Converter[_T]] | str] | Converter[_T] | Callable[[str], _T], ... # PYI055
62 |+ Union[type[_T | Converter[_T]] | str] | Converter[_T] | Callable[[str], _T], ... # PYI055
63 63 | ] = union.__args__
64 64 | ...

View File

@@ -120,7 +120,7 @@ PYI055.pyi:10:15: PYI055 [*] Multiple `type` members in a union. Combine them in
8 8 | z: Union[type[float, int], type[complex]]
9 9 |
10 |-def func(arg: type[int] | str | type[float]) -> None: ...
10 |+def func(arg: type[int | float] | str) -> None: ...
10 |+def func(arg: type[int | float]) -> None: ...
11 11 |
12 12 | # OK
13 13 | x: type[int, str, float]

View File

@@ -56,7 +56,6 @@ mod tests {
}
#[test_case(Rule::InDictKeys, Path::new("SIM118.py"))]
#[test_case(Rule::YodaConditions, Path::new("SIM300.py"))]
#[test_case(Rule::IfElseBlockInsteadOfDictGet, Path::new("SIM401.py"))]
#[test_case(Rule::DictGetWithNoneDefault, Path::new("SIM910.py"))]
fn preview_rules(rule_code: Rule, path: &Path) -> Result<()> {

View File

@@ -1,5 +1,3 @@
use std::cmp;
use anyhow::Result;
use libcst_native::CompOp;
@@ -16,7 +14,6 @@ use crate::cst::helpers::or_space;
use crate::cst::matchers::{match_comparison, transform_expression};
use crate::fix::edits::pad;
use crate::fix::snippet::SourceCodeSnippet;
use crate::settings::types::PreviewMode;
/// ## What it does
/// Checks for conditions that position a constant on the left-hand side of the
@@ -81,65 +78,18 @@ impl Violation for YodaConditions {
}
}
/// Comparisons left-hand side must not be more [`ConstantLikelihood`] than the right-hand side.
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug)]
enum ConstantLikelihood {
/// The expression is unlikely to be a constant (e.g., `foo` or `foo(bar)`).
Unlikely = 0,
/// The expression is likely to be a constant (e.g., `FOO`).
Probably = 1,
/// The expression is definitely a constant (e.g., `42` or `"foo"`).
Definitely = 2,
}
impl ConstantLikelihood {
/// Determine the [`ConstantLikelihood`] of an expression.
fn from_expression(expr: &Expr, preview: PreviewMode) -> Self {
match expr {
_ if expr.is_literal_expr() => ConstantLikelihood::Definitely,
Expr::Attribute(ast::ExprAttribute { attr, .. }) => {
ConstantLikelihood::from_identifier(attr)
}
Expr::Name(ast::ExprName { id, .. }) => ConstantLikelihood::from_identifier(id),
Expr::Tuple(ast::ExprTuple { elts, .. }) => elts
.iter()
.map(|expr| ConstantLikelihood::from_expression(expr, preview))
.min()
.unwrap_or(ConstantLikelihood::Definitely),
Expr::List(ast::ExprList { elts, .. }) if preview.is_enabled() => elts
.iter()
.map(|expr| ConstantLikelihood::from_expression(expr, preview))
.min()
.unwrap_or(ConstantLikelihood::Definitely),
Expr::Dict(ast::ExprDict { values: vs, .. }) if preview.is_enabled() => {
if vs.is_empty() {
ConstantLikelihood::Definitely
} else {
ConstantLikelihood::Probably
}
}
Expr::BinOp(ast::ExprBinOp { left, right, .. }) => cmp::min(
ConstantLikelihood::from_expression(left, preview),
ConstantLikelihood::from_expression(right, preview),
),
Expr::UnaryOp(ast::ExprUnaryOp {
op: UnaryOp::UAdd | UnaryOp::USub | UnaryOp::Invert,
operand,
range: _,
}) => ConstantLikelihood::from_expression(operand, preview),
_ => ConstantLikelihood::Unlikely,
}
}
/// Determine the [`ConstantLikelihood`] of an identifier.
fn from_identifier(identifier: &str) -> Self {
if str::is_cased_uppercase(identifier) {
ConstantLikelihood::Probably
} else {
ConstantLikelihood::Unlikely
}
/// Return `true` if an [`Expr`] is a constant or a constant-like name.
fn is_constant_like(expr: &Expr) -> bool {
match expr {
Expr::Attribute(ast::ExprAttribute { attr, .. }) => str::is_cased_uppercase(attr),
Expr::Tuple(ast::ExprTuple { elts, .. }) => elts.iter().all(is_constant_like),
Expr::Name(ast::ExprName { id, .. }) => str::is_cased_uppercase(id),
Expr::UnaryOp(ast::ExprUnaryOp {
op: UnaryOp::UAdd | UnaryOp::USub | UnaryOp::Invert,
operand,
range: _,
}) => operand.is_literal_expr(),
_ => expr.is_literal_expr(),
}
}
@@ -230,9 +180,7 @@ pub(crate) fn yoda_conditions(
return;
}
if ConstantLikelihood::from_expression(left, checker.settings.preview)
<= ConstantLikelihood::from_expression(right, checker.settings.preview)
{
if !is_constant_like(left) || is_constant_like(right) {
return;
}

View File

@@ -6,8 +6,8 @@ SIM300.py:2:1: SIM300 [*] Yoda conditions are discouraged, use `compare == "yoda
1 | # Errors
2 | "yoda" == compare # SIM300
| ^^^^^^^^^^^^^^^^^ SIM300
3 | 42 == age # SIM300
4 | ("a", "b") == compare # SIM300
3 | "yoda" == compare # SIM300
4 | 42 == age # SIM300
|
= help: Replace Yoda condition with `compare == "yoda"`
@@ -15,340 +15,342 @@ SIM300.py:2:1: SIM300 [*] Yoda conditions are discouraged, use `compare == "yoda
1 1 | # Errors
2 |-"yoda" == compare # SIM300
2 |+compare == "yoda" # SIM300
3 3 | 42 == age # SIM300
4 4 | ("a", "b") == compare # SIM300
5 5 | "yoda" <= compare # SIM300
3 3 | "yoda" == compare # SIM300
4 4 | 42 == age # SIM300
5 5 | ("a", "b") == compare # SIM300
SIM300.py:3:1: SIM300 [*] Yoda conditions are discouraged, use `age == 42` instead
SIM300.py:3:1: SIM300 [*] Yoda conditions are discouraged, use `compare == "yoda"` instead
|
1 | # Errors
2 | "yoda" == compare # SIM300
3 | 42 == age # SIM300
3 | "yoda" == compare # SIM300
| ^^^^^^^^^^^^^^^^^ SIM300
4 | 42 == age # SIM300
5 | ("a", "b") == compare # SIM300
|
= help: Replace Yoda condition with `compare == "yoda"`
Safe fix
1 1 | # Errors
2 2 | "yoda" == compare # SIM300
3 |-"yoda" == compare # SIM300
3 |+compare == "yoda" # SIM300
4 4 | 42 == age # SIM300
5 5 | ("a", "b") == compare # SIM300
6 6 | "yoda" <= compare # SIM300
SIM300.py:4:1: SIM300 [*] Yoda conditions are discouraged, use `age == 42` instead
|
2 | "yoda" == compare # SIM300
3 | "yoda" == compare # SIM300
4 | 42 == age # SIM300
| ^^^^^^^^^ SIM300
4 | ("a", "b") == compare # SIM300
5 | "yoda" <= compare # SIM300
5 | ("a", "b") == compare # SIM300
6 | "yoda" <= compare # SIM300
|
= help: Replace Yoda condition with `age == 42`
Safe fix
1 1 | # Errors
2 2 | "yoda" == compare # SIM300
3 |-42 == age # SIM300
3 |+age == 42 # SIM300
4 4 | ("a", "b") == compare # SIM300
5 5 | "yoda" <= compare # SIM300
6 6 | "yoda" < compare # SIM300
3 3 | "yoda" == compare # SIM300
4 |-42 == age # SIM300
4 |+age == 42 # SIM300
5 5 | ("a", "b") == compare # SIM300
6 6 | "yoda" <= compare # SIM300
7 7 | "yoda" < compare # SIM300
SIM300.py:4:1: SIM300 [*] Yoda conditions are discouraged, use `compare == ("a", "b")` instead
SIM300.py:5:1: SIM300 [*] Yoda conditions are discouraged, use `compare == ("a", "b")` instead
|
2 | "yoda" == compare # SIM300
3 | 42 == age # SIM300
4 | ("a", "b") == compare # SIM300
3 | "yoda" == compare # SIM300
4 | 42 == age # SIM300
5 | ("a", "b") == compare # SIM300
| ^^^^^^^^^^^^^^^^^^^^^ SIM300
5 | "yoda" <= compare # SIM300
6 | "yoda" < compare # SIM300
6 | "yoda" <= compare # SIM300
7 | "yoda" < compare # SIM300
|
= help: Replace Yoda condition with `compare == ("a", "b")`
Safe fix
1 1 | # Errors
2 2 | "yoda" == compare # SIM300
3 3 | 42 == age # SIM300
4 |-("a", "b") == compare # SIM300
4 |+compare == ("a", "b") # SIM300
5 5 | "yoda" <= compare # SIM300
6 6 | "yoda" < compare # SIM300
7 7 | 42 > age # SIM300
3 3 | "yoda" == compare # SIM300
4 4 | 42 == age # SIM300
5 |-("a", "b") == compare # SIM300
5 |+compare == ("a", "b") # SIM300
6 6 | "yoda" <= compare # SIM300
7 7 | "yoda" < compare # SIM300
8 8 | 42 > age # SIM300
SIM300.py:5:1: SIM300 [*] Yoda conditions are discouraged, use `compare >= "yoda"` instead
SIM300.py:6:1: SIM300 [*] Yoda conditions are discouraged, use `compare >= "yoda"` instead
|
3 | 42 == age # SIM300
4 | ("a", "b") == compare # SIM300
5 | "yoda" <= compare # SIM300
4 | 42 == age # SIM300
5 | ("a", "b") == compare # SIM300
6 | "yoda" <= compare # SIM300
| ^^^^^^^^^^^^^^^^^ SIM300
6 | "yoda" < compare # SIM300
7 | 42 > age # SIM300
7 | "yoda" < compare # SIM300
8 | 42 > age # SIM300
|
= help: Replace Yoda condition with `compare >= "yoda"`
Safe fix
2 2 | "yoda" == compare # SIM300
3 3 | 42 == age # SIM300
4 4 | ("a", "b") == compare # SIM300
5 |-"yoda" <= compare # SIM300
5 |+compare >= "yoda" # SIM300
6 6 | "yoda" < compare # SIM300
7 7 | 42 > age # SIM300
8 8 | -42 > age # SIM300
3 3 | "yoda" == compare # SIM300
4 4 | 42 == age # SIM300
5 5 | ("a", "b") == compare # SIM300
6 |-"yoda" <= compare # SIM300
6 |+compare >= "yoda" # SIM300
7 7 | "yoda" < compare # SIM300
8 8 | 42 > age # SIM300
9 9 | -42 > age # SIM300
SIM300.py:6:1: SIM300 [*] Yoda conditions are discouraged, use `compare > "yoda"` instead
SIM300.py:7:1: SIM300 [*] Yoda conditions are discouraged, use `compare > "yoda"` instead
|
4 | ("a", "b") == compare # SIM300
5 | "yoda" <= compare # SIM300
6 | "yoda" < compare # SIM300
5 | ("a", "b") == compare # SIM300
6 | "yoda" <= compare # SIM300
7 | "yoda" < compare # SIM300
| ^^^^^^^^^^^^^^^^ SIM300
7 | 42 > age # SIM300
8 | -42 > age # SIM300
8 | 42 > age # SIM300
9 | -42 > age # SIM300
|
= help: Replace Yoda condition with `compare > "yoda"`
Safe fix
3 3 | 42 == age # SIM300
4 4 | ("a", "b") == compare # SIM300
5 5 | "yoda" <= compare # SIM300
6 |-"yoda" < compare # SIM300
6 |+compare > "yoda" # SIM300
7 7 | 42 > age # SIM300
8 8 | -42 > age # SIM300
9 9 | +42 > age # SIM300
4 4 | 42 == age # SIM300
5 5 | ("a", "b") == compare # SIM300
6 6 | "yoda" <= compare # SIM300
7 |-"yoda" < compare # SIM300
7 |+compare > "yoda" # SIM300
8 8 | 42 > age # SIM300
9 9 | -42 > age # SIM300
10 10 | +42 > age # SIM300
SIM300.py:7:1: SIM300 [*] Yoda conditions are discouraged, use `age < 42` instead
|
5 | "yoda" <= compare # SIM300
6 | "yoda" < compare # SIM300
7 | 42 > age # SIM300
| ^^^^^^^^ SIM300
8 | -42 > age # SIM300
9 | +42 > age # SIM300
|
= help: Replace Yoda condition with `age < 42`
SIM300.py:8:1: SIM300 [*] Yoda conditions are discouraged, use `age < 42` instead
|
6 | "yoda" <= compare # SIM300
7 | "yoda" < compare # SIM300
8 | 42 > age # SIM300
| ^^^^^^^^ SIM300
9 | -42 > age # SIM300
10 | +42 > age # SIM300
|
= help: Replace Yoda condition with `age < 42`
Safe fix
4 4 | ("a", "b") == compare # SIM300
5 5 | "yoda" <= compare # SIM300
6 6 | "yoda" < compare # SIM300
7 |-42 > age # SIM300
7 |+age < 42 # SIM300
8 8 | -42 > age # SIM300
9 9 | +42 > age # SIM300
10 10 | YODA == age # SIM300
5 5 | ("a", "b") == compare # SIM300
6 6 | "yoda" <= compare # SIM300
7 7 | "yoda" < compare # SIM300
8 |-42 > age # SIM300
8 |+age < 42 # SIM300
9 9 | -42 > age # SIM300
10 10 | +42 > age # SIM300
11 11 | YODA == age # SIM300
SIM300.py:8:1: SIM300 [*] Yoda conditions are discouraged, use `age < -42` instead
SIM300.py:9:1: SIM300 [*] Yoda conditions are discouraged, use `age < -42` instead
|
6 | "yoda" < compare # SIM300
7 | 42 > age # SIM300
8 | -42 > age # SIM300
7 | "yoda" < compare # SIM300
8 | 42 > age # SIM300
9 | -42 > age # SIM300
| ^^^^^^^^^ SIM300
9 | +42 > age # SIM300
10 | YODA == age # SIM300
10 | +42 > age # SIM300
11 | YODA == age # SIM300
|
= help: Replace Yoda condition with `age < -42`
Safe fix
5 5 | "yoda" <= compare # SIM300
6 6 | "yoda" < compare # SIM300
7 7 | 42 > age # SIM300
8 |--42 > age # SIM300
8 |+age < -42 # SIM300
9 9 | +42 > age # SIM300
10 10 | YODA == age # SIM300
11 11 | YODA > age # SIM300
6 6 | "yoda" <= compare # SIM300
7 7 | "yoda" < compare # SIM300
8 8 | 42 > age # SIM300
9 |--42 > age # SIM300
9 |+age < -42 # SIM300
10 10 | +42 > age # SIM300
11 11 | YODA == age # SIM300
12 12 | YODA > age # SIM300
SIM300.py:9:1: SIM300 [*] Yoda conditions are discouraged, use `age < +42` instead
SIM300.py:10:1: SIM300 [*] Yoda conditions are discouraged, use `age < +42` instead
|
7 | 42 > age # SIM300
8 | -42 > age # SIM300
9 | +42 > age # SIM300
8 | 42 > age # SIM300
9 | -42 > age # SIM300
10 | +42 > age # SIM300
| ^^^^^^^^^ SIM300
10 | YODA == age # SIM300
11 | YODA > age # SIM300
11 | YODA == age # SIM300
12 | YODA > age # SIM300
|
= help: Replace Yoda condition with `age < +42`
Safe fix
6 6 | "yoda" < compare # SIM300
7 7 | 42 > age # SIM300
8 8 | -42 > age # SIM300
9 |-+42 > age # SIM300
9 |+age < +42 # SIM300
10 10 | YODA == age # SIM300
11 11 | YODA > age # SIM300
12 12 | YODA >= age # SIM300
7 7 | "yoda" < compare # SIM300
8 8 | 42 > age # SIM300
9 9 | -42 > age # SIM300
10 |-+42 > age # SIM300
10 |+age < +42 # SIM300
11 11 | YODA == age # SIM300
12 12 | YODA > age # SIM300
13 13 | YODA >= age # SIM300
SIM300.py:10:1: SIM300 [*] Yoda conditions are discouraged, use `age == YODA` instead
SIM300.py:11:1: SIM300 [*] Yoda conditions are discouraged, use `age == YODA` instead
|
8 | -42 > age # SIM300
9 | +42 > age # SIM300
10 | YODA == age # SIM300
9 | -42 > age # SIM300
10 | +42 > age # SIM300
11 | YODA == age # SIM300
| ^^^^^^^^^^^ SIM300
11 | YODA > age # SIM300
12 | YODA >= age # SIM300
12 | YODA > age # SIM300
13 | YODA >= age # SIM300
|
= help: Replace Yoda condition with `age == YODA`
Safe fix
7 7 | 42 > age # SIM300
8 8 | -42 > age # SIM300
9 9 | +42 > age # SIM300
10 |-YODA == age # SIM300
10 |+age == YODA # SIM300
11 11 | YODA > age # SIM300
12 12 | YODA >= age # SIM300
13 13 | JediOrder.YODA == age # SIM300
8 8 | 42 > age # SIM300
9 9 | -42 > age # SIM300
10 10 | +42 > age # SIM300
11 |-YODA == age # SIM300
11 |+age == YODA # SIM300
12 12 | YODA > age # SIM300
13 13 | YODA >= age # SIM300
14 14 | JediOrder.YODA == age # SIM300
SIM300.py:11:1: SIM300 [*] Yoda conditions are discouraged, use `age < YODA` instead
SIM300.py:12:1: SIM300 [*] Yoda conditions are discouraged, use `age < YODA` instead
|
9 | +42 > age # SIM300
10 | YODA == age # SIM300
11 | YODA > age # SIM300
10 | +42 > age # SIM300
11 | YODA == age # SIM300
12 | YODA > age # SIM300
| ^^^^^^^^^^ SIM300
12 | YODA >= age # SIM300
13 | JediOrder.YODA == age # SIM300
13 | YODA >= age # SIM300
14 | JediOrder.YODA == age # SIM300
|
= help: Replace Yoda condition with `age < YODA`
Safe fix
8 8 | -42 > age # SIM300
9 9 | +42 > age # SIM300
10 10 | YODA == age # SIM300
11 |-YODA > age # SIM300
11 |+age < YODA # SIM300
12 12 | YODA >= age # SIM300
13 13 | JediOrder.YODA == age # SIM300
14 14 | 0 < (number - 100) # SIM300
9 9 | -42 > age # SIM300
10 10 | +42 > age # SIM300
11 11 | YODA == age # SIM300
12 |-YODA > age # SIM300
12 |+age < YODA # SIM300
13 13 | YODA >= age # SIM300
14 14 | JediOrder.YODA == age # SIM300
15 15 | 0 < (number - 100) # SIM300
SIM300.py:12:1: SIM300 [*] Yoda conditions are discouraged, use `age <= YODA` instead
SIM300.py:13:1: SIM300 [*] Yoda conditions are discouraged, use `age <= YODA` instead
|
10 | YODA == age # SIM300
11 | YODA > age # SIM300
12 | YODA >= age # SIM300
11 | YODA == age # SIM300
12 | YODA > age # SIM300
13 | YODA >= age # SIM300
| ^^^^^^^^^^^ SIM300
13 | JediOrder.YODA == age # SIM300
14 | 0 < (number - 100) # SIM300
14 | JediOrder.YODA == age # SIM300
15 | 0 < (number - 100) # SIM300
|
= help: Replace Yoda condition with `age <= YODA`
Safe fix
9 9 | +42 > age # SIM300
10 10 | YODA == age # SIM300
11 11 | YODA > age # SIM300
12 |-YODA >= age # SIM300
12 |+age <= YODA # SIM300
13 13 | JediOrder.YODA == age # SIM300
14 14 | 0 < (number - 100) # SIM300
15 15 | B<A[0][0]or B
10 10 | +42 > age # SIM300
11 11 | YODA == age # SIM300
12 12 | YODA > age # SIM300
13 |-YODA >= age # SIM300
13 |+age <= YODA # SIM300
14 14 | JediOrder.YODA == age # SIM300
15 15 | 0 < (number - 100) # SIM300
16 16 | SomeClass().settings.SOME_CONSTANT_VALUE > (60 * 60) # SIM300
SIM300.py:13:1: SIM300 [*] Yoda conditions are discouraged, use `age == JediOrder.YODA` instead
SIM300.py:14:1: SIM300 [*] Yoda conditions are discouraged, use `age == JediOrder.YODA` instead
|
11 | YODA > age # SIM300
12 | YODA >= age # SIM300
13 | JediOrder.YODA == age # SIM300
12 | YODA > age # SIM300
13 | YODA >= age # SIM300
14 | JediOrder.YODA == age # SIM300
| ^^^^^^^^^^^^^^^^^^^^^ SIM300
14 | 0 < (number - 100) # SIM300
15 | B<A[0][0]or B
15 | 0 < (number - 100) # SIM300
16 | SomeClass().settings.SOME_CONSTANT_VALUE > (60 * 60) # SIM300
|
= help: Replace Yoda condition with `age == JediOrder.YODA`
Safe fix
10 10 | YODA == age # SIM300
11 11 | YODA > age # SIM300
12 12 | YODA >= age # SIM300
13 |-JediOrder.YODA == age # SIM300
13 |+age == JediOrder.YODA # SIM300
14 14 | 0 < (number - 100) # SIM300
15 15 | B<A[0][0]or B
16 16 | B or(B)<A[0][0]
11 11 | YODA == age # SIM300
12 12 | YODA > age # SIM300
13 13 | YODA >= age # SIM300
14 |-JediOrder.YODA == age # SIM300
14 |+age == JediOrder.YODA # SIM300
15 15 | 0 < (number - 100) # SIM300
16 16 | SomeClass().settings.SOME_CONSTANT_VALUE > (60 * 60) # SIM300
17 17 | B<A[0][0]or B
SIM300.py:14:1: SIM300 [*] Yoda conditions are discouraged, use `(number - 100) > 0` instead
SIM300.py:15:1: SIM300 [*] Yoda conditions are discouraged, use `(number - 100) > 0` instead
|
12 | YODA >= age # SIM300
13 | JediOrder.YODA == age # SIM300
14 | 0 < (number - 100) # SIM300
13 | YODA >= age # SIM300
14 | JediOrder.YODA == age # SIM300
15 | 0 < (number - 100) # SIM300
| ^^^^^^^^^^^^^^^^^^ SIM300
15 | B<A[0][0]or B
16 | B or(B)<A[0][0]
16 | SomeClass().settings.SOME_CONSTANT_VALUE > (60 * 60) # SIM300
17 | B<A[0][0]or B
|
= help: Replace Yoda condition with `(number - 100) > 0`
Safe fix
11 11 | YODA > age # SIM300
12 12 | YODA >= age # SIM300
13 13 | JediOrder.YODA == age # SIM300
14 |-0 < (number - 100) # SIM300
14 |+(number - 100) > 0 # SIM300
15 15 | B<A[0][0]or B
16 16 | B or(B)<A[0][0]
17 17 |
12 12 | YODA > age # SIM300
13 13 | YODA >= age # SIM300
14 14 | JediOrder.YODA == age # SIM300
15 |-0 < (number - 100) # SIM300
15 |+(number - 100) > 0 # SIM300
16 16 | SomeClass().settings.SOME_CONSTANT_VALUE > (60 * 60) # SIM300
17 17 | B<A[0][0]or B
18 18 | B or(B)<A[0][0]
SIM300.py:15:1: SIM300 [*] Yoda conditions are discouraged, use `A[0][0] > B` instead
SIM300.py:16:1: SIM300 [*] Yoda conditions are discouraged
|
13 | JediOrder.YODA == age # SIM300
14 | 0 < (number - 100) # SIM300
15 | B<A[0][0]or B
14 | JediOrder.YODA == age # SIM300
15 | 0 < (number - 100) # SIM300
16 | SomeClass().settings.SOME_CONSTANT_VALUE > (60 * 60) # SIM300
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ SIM300
17 | B<A[0][0]or B
18 | B or(B)<A[0][0]
|
= help: Replace Yoda condition
Safe fix
13 13 | YODA >= age # SIM300
14 14 | JediOrder.YODA == age # SIM300
15 15 | 0 < (number - 100) # SIM300
16 |-SomeClass().settings.SOME_CONSTANT_VALUE > (60 * 60) # SIM300
16 |+(60 * 60) < SomeClass().settings.SOME_CONSTANT_VALUE # SIM300
17 17 | B<A[0][0]or B
18 18 | B or(B)<A[0][0]
19 19 |
SIM300.py:17:1: SIM300 [*] Yoda conditions are discouraged, use `A[0][0] > B` instead
|
15 | 0 < (number - 100) # SIM300
16 | SomeClass().settings.SOME_CONSTANT_VALUE > (60 * 60) # SIM300
17 | B<A[0][0]or B
| ^^^^^^^^^ SIM300
16 | B or(B)<A[0][0]
18 | B or(B)<A[0][0]
|
= help: Replace Yoda condition with `A[0][0] > B`
Safe fix
12 12 | YODA >= age # SIM300
13 13 | JediOrder.YODA == age # SIM300
14 14 | 0 < (number - 100) # SIM300
15 |-B<A[0][0]or B
15 |+A[0][0] > B or B
16 16 | B or(B)<A[0][0]
17 17 |
18 18 | # Errors in preview
14 14 | JediOrder.YODA == age # SIM300
15 15 | 0 < (number - 100) # SIM300
16 16 | SomeClass().settings.SOME_CONSTANT_VALUE > (60 * 60) # SIM300
17 |-B<A[0][0]or B
17 |+A[0][0] > B or B
18 18 | B or(B)<A[0][0]
19 19 |
20 20 | # OK
SIM300.py:16:5: SIM300 [*] Yoda conditions are discouraged, use `A[0][0] > (B)` instead
SIM300.py:18:5: SIM300 [*] Yoda conditions are discouraged, use `A[0][0] > (B)` instead
|
14 | 0 < (number - 100) # SIM300
15 | B<A[0][0]or B
16 | B or(B)<A[0][0]
16 | SomeClass().settings.SOME_CONSTANT_VALUE > (60 * 60) # SIM300
17 | B<A[0][0]or B
18 | B or(B)<A[0][0]
| ^^^^^^^^^^^ SIM300
17 |
18 | # Errors in preview
19 |
20 | # OK
|
= help: Replace Yoda condition with `A[0][0] > (B)`
Safe fix
13 13 | JediOrder.YODA == age # SIM300
14 14 | 0 < (number - 100) # SIM300
15 15 | B<A[0][0]or B
16 |-B or(B)<A[0][0]
16 |+B or A[0][0] > (B)
17 17 |
18 18 | # Errors in preview
19 19 | ['upper'] == UPPER_LIST
SIM300.py:23:1: SIM300 [*] Yoda conditions are discouraged, use `['upper'] == UPPER_LIST` instead
|
22 | # Errors in stable
23 | UPPER_LIST == ['upper']
| ^^^^^^^^^^^^^^^^^^^^^^^ SIM300
24 | DummyHandler.CONFIG == {}
|
= help: Replace Yoda condition with `['upper'] == UPPER_LIST`
Safe fix
20 20 | {} == DummyHandler.CONFIG
21 21 |
22 22 | # Errors in stable
23 |-UPPER_LIST == ['upper']
23 |+['upper'] == UPPER_LIST
24 24 | DummyHandler.CONFIG == {}
25 25 |
26 26 | # OK
SIM300.py:24:1: SIM300 [*] Yoda conditions are discouraged, use `{} == DummyHandler.CONFIG` instead
|
22 | # Errors in stable
23 | UPPER_LIST == ['upper']
24 | DummyHandler.CONFIG == {}
| ^^^^^^^^^^^^^^^^^^^^^^^^^ SIM300
25 |
26 | # OK
|
= help: Replace Yoda condition with `{} == DummyHandler.CONFIG`
Safe fix
21 21 |
22 22 | # Errors in stable
23 23 | UPPER_LIST == ['upper']
24 |-DummyHandler.CONFIG == {}
24 |+{} == DummyHandler.CONFIG
25 25 |
26 26 | # OK
27 27 | compare == "yoda"
15 15 | 0 < (number - 100) # SIM300
16 16 | SomeClass().settings.SOME_CONSTANT_VALUE > (60 * 60) # SIM300
17 17 | B<A[0][0]or B
18 |-B or(B)<A[0][0]
18 |+B or A[0][0] > (B)
19 19 |
20 20 | # OK
21 21 | compare == "yoda"

View File

@@ -1,354 +0,0 @@
---
source: crates/ruff_linter/src/rules/flake8_simplify/mod.rs
---
SIM300.py:2:1: SIM300 [*] Yoda conditions are discouraged, use `compare == "yoda"` instead
|
1 | # Errors
2 | "yoda" == compare # SIM300
| ^^^^^^^^^^^^^^^^^ SIM300
3 | 42 == age # SIM300
4 | ("a", "b") == compare # SIM300
|
= help: Replace Yoda condition with `compare == "yoda"`
Safe fix
1 1 | # Errors
2 |-"yoda" == compare # SIM300
2 |+compare == "yoda" # SIM300
3 3 | 42 == age # SIM300
4 4 | ("a", "b") == compare # SIM300
5 5 | "yoda" <= compare # SIM300
SIM300.py:3:1: SIM300 [*] Yoda conditions are discouraged, use `age == 42` instead
|
1 | # Errors
2 | "yoda" == compare # SIM300
3 | 42 == age # SIM300
| ^^^^^^^^^ SIM300
4 | ("a", "b") == compare # SIM300
5 | "yoda" <= compare # SIM300
|
= help: Replace Yoda condition with `age == 42`
Safe fix
1 1 | # Errors
2 2 | "yoda" == compare # SIM300
3 |-42 == age # SIM300
3 |+age == 42 # SIM300
4 4 | ("a", "b") == compare # SIM300
5 5 | "yoda" <= compare # SIM300
6 6 | "yoda" < compare # SIM300
SIM300.py:4:1: SIM300 [*] Yoda conditions are discouraged, use `compare == ("a", "b")` instead
|
2 | "yoda" == compare # SIM300
3 | 42 == age # SIM300
4 | ("a", "b") == compare # SIM300
| ^^^^^^^^^^^^^^^^^^^^^ SIM300
5 | "yoda" <= compare # SIM300
6 | "yoda" < compare # SIM300
|
= help: Replace Yoda condition with `compare == ("a", "b")`
Safe fix
1 1 | # Errors
2 2 | "yoda" == compare # SIM300
3 3 | 42 == age # SIM300
4 |-("a", "b") == compare # SIM300
4 |+compare == ("a", "b") # SIM300
5 5 | "yoda" <= compare # SIM300
6 6 | "yoda" < compare # SIM300
7 7 | 42 > age # SIM300
SIM300.py:5:1: SIM300 [*] Yoda conditions are discouraged, use `compare >= "yoda"` instead
|
3 | 42 == age # SIM300
4 | ("a", "b") == compare # SIM300
5 | "yoda" <= compare # SIM300
| ^^^^^^^^^^^^^^^^^ SIM300
6 | "yoda" < compare # SIM300
7 | 42 > age # SIM300
|
= help: Replace Yoda condition with `compare >= "yoda"`
Safe fix
2 2 | "yoda" == compare # SIM300
3 3 | 42 == age # SIM300
4 4 | ("a", "b") == compare # SIM300
5 |-"yoda" <= compare # SIM300
5 |+compare >= "yoda" # SIM300
6 6 | "yoda" < compare # SIM300
7 7 | 42 > age # SIM300
8 8 | -42 > age # SIM300
SIM300.py:6:1: SIM300 [*] Yoda conditions are discouraged, use `compare > "yoda"` instead
|
4 | ("a", "b") == compare # SIM300
5 | "yoda" <= compare # SIM300
6 | "yoda" < compare # SIM300
| ^^^^^^^^^^^^^^^^ SIM300
7 | 42 > age # SIM300
8 | -42 > age # SIM300
|
= help: Replace Yoda condition with `compare > "yoda"`
Safe fix
3 3 | 42 == age # SIM300
4 4 | ("a", "b") == compare # SIM300
5 5 | "yoda" <= compare # SIM300
6 |-"yoda" < compare # SIM300
6 |+compare > "yoda" # SIM300
7 7 | 42 > age # SIM300
8 8 | -42 > age # SIM300
9 9 | +42 > age # SIM300
SIM300.py:7:1: SIM300 [*] Yoda conditions are discouraged, use `age < 42` instead
|
5 | "yoda" <= compare # SIM300
6 | "yoda" < compare # SIM300
7 | 42 > age # SIM300
| ^^^^^^^^ SIM300
8 | -42 > age # SIM300
9 | +42 > age # SIM300
|
= help: Replace Yoda condition with `age < 42`
Safe fix
4 4 | ("a", "b") == compare # SIM300
5 5 | "yoda" <= compare # SIM300
6 6 | "yoda" < compare # SIM300
7 |-42 > age # SIM300
7 |+age < 42 # SIM300
8 8 | -42 > age # SIM300
9 9 | +42 > age # SIM300
10 10 | YODA == age # SIM300
SIM300.py:8:1: SIM300 [*] Yoda conditions are discouraged, use `age < -42` instead
|
6 | "yoda" < compare # SIM300
7 | 42 > age # SIM300
8 | -42 > age # SIM300
| ^^^^^^^^^ SIM300
9 | +42 > age # SIM300
10 | YODA == age # SIM300
|
= help: Replace Yoda condition with `age < -42`
Safe fix
5 5 | "yoda" <= compare # SIM300
6 6 | "yoda" < compare # SIM300
7 7 | 42 > age # SIM300
8 |--42 > age # SIM300
8 |+age < -42 # SIM300
9 9 | +42 > age # SIM300
10 10 | YODA == age # SIM300
11 11 | YODA > age # SIM300
SIM300.py:9:1: SIM300 [*] Yoda conditions are discouraged, use `age < +42` instead
|
7 | 42 > age # SIM300
8 | -42 > age # SIM300
9 | +42 > age # SIM300
| ^^^^^^^^^ SIM300
10 | YODA == age # SIM300
11 | YODA > age # SIM300
|
= help: Replace Yoda condition with `age < +42`
Safe fix
6 6 | "yoda" < compare # SIM300
7 7 | 42 > age # SIM300
8 8 | -42 > age # SIM300
9 |-+42 > age # SIM300
9 |+age < +42 # SIM300
10 10 | YODA == age # SIM300
11 11 | YODA > age # SIM300
12 12 | YODA >= age # SIM300
SIM300.py:10:1: SIM300 [*] Yoda conditions are discouraged, use `age == YODA` instead
|
8 | -42 > age # SIM300
9 | +42 > age # SIM300
10 | YODA == age # SIM300
| ^^^^^^^^^^^ SIM300
11 | YODA > age # SIM300
12 | YODA >= age # SIM300
|
= help: Replace Yoda condition with `age == YODA`
Safe fix
7 7 | 42 > age # SIM300
8 8 | -42 > age # SIM300
9 9 | +42 > age # SIM300
10 |-YODA == age # SIM300
10 |+age == YODA # SIM300
11 11 | YODA > age # SIM300
12 12 | YODA >= age # SIM300
13 13 | JediOrder.YODA == age # SIM300
SIM300.py:11:1: SIM300 [*] Yoda conditions are discouraged, use `age < YODA` instead
|
9 | +42 > age # SIM300
10 | YODA == age # SIM300
11 | YODA > age # SIM300
| ^^^^^^^^^^ SIM300
12 | YODA >= age # SIM300
13 | JediOrder.YODA == age # SIM300
|
= help: Replace Yoda condition with `age < YODA`
Safe fix
8 8 | -42 > age # SIM300
9 9 | +42 > age # SIM300
10 10 | YODA == age # SIM300
11 |-YODA > age # SIM300
11 |+age < YODA # SIM300
12 12 | YODA >= age # SIM300
13 13 | JediOrder.YODA == age # SIM300
14 14 | 0 < (number - 100) # SIM300
SIM300.py:12:1: SIM300 [*] Yoda conditions are discouraged, use `age <= YODA` instead
|
10 | YODA == age # SIM300
11 | YODA > age # SIM300
12 | YODA >= age # SIM300
| ^^^^^^^^^^^ SIM300
13 | JediOrder.YODA == age # SIM300
14 | 0 < (number - 100) # SIM300
|
= help: Replace Yoda condition with `age <= YODA`
Safe fix
9 9 | +42 > age # SIM300
10 10 | YODA == age # SIM300
11 11 | YODA > age # SIM300
12 |-YODA >= age # SIM300
12 |+age <= YODA # SIM300
13 13 | JediOrder.YODA == age # SIM300
14 14 | 0 < (number - 100) # SIM300
15 15 | B<A[0][0]or B
SIM300.py:13:1: SIM300 [*] Yoda conditions are discouraged, use `age == JediOrder.YODA` instead
|
11 | YODA > age # SIM300
12 | YODA >= age # SIM300
13 | JediOrder.YODA == age # SIM300
| ^^^^^^^^^^^^^^^^^^^^^ SIM300
14 | 0 < (number - 100) # SIM300
15 | B<A[0][0]or B
|
= help: Replace Yoda condition with `age == JediOrder.YODA`
Safe fix
10 10 | YODA == age # SIM300
11 11 | YODA > age # SIM300
12 12 | YODA >= age # SIM300
13 |-JediOrder.YODA == age # SIM300
13 |+age == JediOrder.YODA # SIM300
14 14 | 0 < (number - 100) # SIM300
15 15 | B<A[0][0]or B
16 16 | B or(B)<A[0][0]
SIM300.py:14:1: SIM300 [*] Yoda conditions are discouraged, use `(number - 100) > 0` instead
|
12 | YODA >= age # SIM300
13 | JediOrder.YODA == age # SIM300
14 | 0 < (number - 100) # SIM300
| ^^^^^^^^^^^^^^^^^^ SIM300
15 | B<A[0][0]or B
16 | B or(B)<A[0][0]
|
= help: Replace Yoda condition with `(number - 100) > 0`
Safe fix
11 11 | YODA > age # SIM300
12 12 | YODA >= age # SIM300
13 13 | JediOrder.YODA == age # SIM300
14 |-0 < (number - 100) # SIM300
14 |+(number - 100) > 0 # SIM300
15 15 | B<A[0][0]or B
16 16 | B or(B)<A[0][0]
17 17 |
SIM300.py:15:1: SIM300 [*] Yoda conditions are discouraged, use `A[0][0] > B` instead
|
13 | JediOrder.YODA == age # SIM300
14 | 0 < (number - 100) # SIM300
15 | B<A[0][0]or B
| ^^^^^^^^^ SIM300
16 | B or(B)<A[0][0]
|
= help: Replace Yoda condition with `A[0][0] > B`
Safe fix
12 12 | YODA >= age # SIM300
13 13 | JediOrder.YODA == age # SIM300
14 14 | 0 < (number - 100) # SIM300
15 |-B<A[0][0]or B
15 |+A[0][0] > B or B
16 16 | B or(B)<A[0][0]
17 17 |
18 18 | # Errors in preview
SIM300.py:16:5: SIM300 [*] Yoda conditions are discouraged, use `A[0][0] > (B)` instead
|
14 | 0 < (number - 100) # SIM300
15 | B<A[0][0]or B
16 | B or(B)<A[0][0]
| ^^^^^^^^^^^ SIM300
17 |
18 | # Errors in preview
|
= help: Replace Yoda condition with `A[0][0] > (B)`
Safe fix
13 13 | JediOrder.YODA == age # SIM300
14 14 | 0 < (number - 100) # SIM300
15 15 | B<A[0][0]or B
16 |-B or(B)<A[0][0]
16 |+B or A[0][0] > (B)
17 17 |
18 18 | # Errors in preview
19 19 | ['upper'] == UPPER_LIST
SIM300.py:19:1: SIM300 [*] Yoda conditions are discouraged, use `UPPER_LIST == ['upper']` instead
|
18 | # Errors in preview
19 | ['upper'] == UPPER_LIST
| ^^^^^^^^^^^^^^^^^^^^^^^ SIM300
20 | {} == DummyHandler.CONFIG
|
= help: Replace Yoda condition with `UPPER_LIST == ['upper']`
Safe fix
16 16 | B or(B)<A[0][0]
17 17 |
18 18 | # Errors in preview
19 |-['upper'] == UPPER_LIST
19 |+UPPER_LIST == ['upper']
20 20 | {} == DummyHandler.CONFIG
21 21 |
22 22 | # Errors in stable
SIM300.py:20:1: SIM300 [*] Yoda conditions are discouraged, use `DummyHandler.CONFIG == {}` instead
|
18 | # Errors in preview
19 | ['upper'] == UPPER_LIST
20 | {} == DummyHandler.CONFIG
| ^^^^^^^^^^^^^^^^^^^^^^^^^ SIM300
21 |
22 | # Errors in stable
|
= help: Replace Yoda condition with `DummyHandler.CONFIG == {}`
Safe fix
17 17 |
18 18 | # Errors in preview
19 19 | ['upper'] == UPPER_LIST
20 |-{} == DummyHandler.CONFIG
20 |+DummyHandler.CONFIG == {}
21 21 |
22 22 | # Errors in stable
23 23 | UPPER_LIST == ['upper']

View File

@@ -1,12 +1,13 @@
use anyhow::Result;
use rustc_hash::FxHashSet;
use ruff_diagnostics::Edit;
use ruff_python_ast::call_path::from_qualified_name;
use ruff_python_ast::helpers::map_callable;
use ruff_python_ast::helpers::{map_callable, map_subscript};
use ruff_python_ast::{self as ast, Expr};
use ruff_python_codegen::{Generator, Stylist};
use ruff_python_codegen::Stylist;
use ruff_python_semantic::{
analyze, Binding, BindingKind, NodeId, ResolvedReference, SemanticModel,
Binding, BindingId, BindingKind, NodeId, ResolvedReference, SemanticModel,
};
use ruff_source_file::Locator;
use ruff_text_size::Ranged;
@@ -58,17 +59,57 @@ pub(crate) fn runtime_required_class(
false
}
/// Return `true` if a class is a subclass of a runtime-required base class.
fn runtime_required_base_class(
class_def: &ast::StmtClassDef,
base_classes: &[String],
semantic: &SemanticModel,
) -> bool {
analyze::class::any_over_body(class_def, semantic, &|call_path| {
base_classes
.iter()
.any(|base_class| from_qualified_name(base_class) == call_path)
})
fn inner(
class_def: &ast::StmtClassDef,
base_classes: &[String],
semantic: &SemanticModel,
seen: &mut FxHashSet<BindingId>,
) -> bool {
class_def.bases().iter().any(|expr| {
// If the base class is itself runtime-required, then this is too.
// Ex) `class Foo(BaseModel): ...`
if semantic
.resolve_call_path(map_subscript(expr))
.is_some_and(|call_path| {
base_classes
.iter()
.any(|base_class| from_qualified_name(base_class) == call_path)
})
{
return true;
}
// If the base class extends a runtime-required class, then this does too.
// Ex) `class Bar(BaseModel): ...; class Foo(Bar): ...`
if let Some(id) = semantic.lookup_attribute(map_subscript(expr)) {
if seen.insert(id) {
let binding = semantic.binding(id);
if let Some(base_class) = binding
.kind
.as_class_definition()
.map(|id| &semantic.scopes[*id])
.and_then(|scope| scope.kind.as_class())
{
if inner(base_class, base_classes, semantic, seen) {
return true;
}
}
}
}
false
})
}
if base_classes.is_empty() {
return false;
}
inner(class_def, base_classes, semantic, &mut FxHashSet::default())
}
fn runtime_required_decorators(
@@ -174,7 +215,6 @@ pub(crate) fn quote_annotation(
semantic: &SemanticModel,
locator: &Locator,
stylist: &Stylist,
generator: Generator,
) -> Result<Edit> {
let expr = semantic.expression(node_id).expect("Expression not found");
if let Some(parent_id) = semantic.parent_expression_id(node_id) {
@@ -184,7 +224,7 @@ pub(crate) fn quote_annotation(
// If we're quoting the value of a subscript, we need to quote the entire
// expression. For example, when quoting `DataFrame` in `DataFrame[int]`, we
// should generate `"DataFrame[int]"`.
return quote_annotation(parent_id, semantic, locator, stylist, generator);
return quote_annotation(parent_id, semantic, locator, stylist);
}
}
Some(Expr::Attribute(parent)) => {
@@ -192,7 +232,7 @@ pub(crate) fn quote_annotation(
// If we're quoting the value of an attribute, we need to quote the entire
// expression. For example, when quoting `DataFrame` in `pd.DataFrame`, we
// should generate `"pd.DataFrame"`.
return quote_annotation(parent_id, semantic, locator, stylist, generator);
return quote_annotation(parent_id, semantic, locator, stylist);
}
}
Some(Expr::Call(parent)) => {
@@ -200,7 +240,7 @@ pub(crate) fn quote_annotation(
// If we're quoting the function of a call, we need to quote the entire
// expression. For example, when quoting `DataFrame` in `DataFrame()`, we
// should generate `"DataFrame()"`.
return quote_annotation(parent_id, semantic, locator, stylist, generator);
return quote_annotation(parent_id, semantic, locator, stylist);
}
}
Some(Expr::BinOp(parent)) => {
@@ -208,44 +248,27 @@ pub(crate) fn quote_annotation(
// If we're quoting the left or right side of a binary operation, we need to
// quote the entire expression. For example, when quoting `DataFrame` in
// `DataFrame | Series`, we should generate `"DataFrame | Series"`.
return quote_annotation(parent_id, semantic, locator, stylist, generator);
return quote_annotation(parent_id, semantic, locator, stylist);
}
}
_ => {}
}
}
let annotation = locator.slice(expr);
// If the annotation already contains a quote, avoid attempting to re-quote it. For example:
// ```python
// from typing import Literal
//
// Set[Literal["Foo"]]
// ```
let text = locator.slice(expr);
if text.contains('\'') || text.contains('"') {
if annotation.contains('\'') || annotation.contains('"') {
return Err(anyhow::anyhow!("Annotation already contains a quote"));
}
// Quote the entire expression.
// If we're quoting a name, we need to quote the entire expression.
let quote = stylist.quote();
let annotation = generator.expr(expr);
Ok(Edit::range_replacement(
format!("{quote}{annotation}{quote}"),
expr.range(),
))
}
/// Filter out any [`Edit`]s that are completely contained by any other [`Edit`].
pub(crate) fn filter_contained(edits: Vec<Edit>) -> Vec<Edit> {
let mut filtered: Vec<Edit> = Vec::with_capacity(edits.len());
for edit in edits {
if filtered
.iter()
.all(|filtered_edit| !filtered_edit.range().contains_range(edit.range()))
{
filtered.push(edit);
}
}
filtered
let annotation = format!("{quote}{annotation}{quote}");
Ok(Edit::range_replacement(annotation, expr.range()))
}

View File

@@ -35,8 +35,6 @@ mod tests {
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TCH004_8.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("TCH004_9.py"))]
#[test_case(Rule::RuntimeImportInTypeCheckingBlock, Path::new("quote.py"))]
#[test_case(Rule::RuntimeStringUnion, Path::new("TCH006_1.py"))]
#[test_case(Rule::RuntimeStringUnion, Path::new("TCH006_2.py"))]
#[test_case(Rule::TypingOnlyFirstPartyImport, Path::new("TCH001.py"))]
#[test_case(Rule::TypingOnlyStandardLibraryImport, Path::new("TCH003.py"))]
#[test_case(Rule::TypingOnlyStandardLibraryImport, Path::new("snapshot.py"))]
@@ -106,35 +104,6 @@ mod tests {
Ok(())
}
#[test_case(
Rule::TypingOnlyStandardLibraryImport,
Path::new("exempt_type_checking_1.py")
)]
#[test_case(
Rule::TypingOnlyStandardLibraryImport,
Path::new("exempt_type_checking_2.py")
)]
#[test_case(
Rule::TypingOnlyStandardLibraryImport,
Path::new("exempt_type_checking_3.py")
)]
fn exempt_type_checking(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!("{}_{}", rule_code.as_ref(), path.to_string_lossy());
let diagnostics = test_path(
Path::new("flake8_type_checking").join(path).as_path(),
&settings::LinterSettings {
flake8_type_checking: super::settings::Settings {
exempt_modules: vec![],
strict: true,
..Default::default()
},
..settings::LinterSettings::for_rule(rule_code)
},
)?;
assert_messages!(snapshot, diagnostics);
Ok(())
}
#[test_case(
Rule::RuntimeImportInTypeCheckingBlock,
Path::new("runtime_evaluated_base_classes_1.py")

View File

@@ -1,9 +1,7 @@
pub(crate) use empty_type_checking_block::*;
pub(crate) use runtime_import_in_type_checking_block::*;
pub(crate) use runtime_string_union::*;
pub(crate) use typing_only_runtime_import::*;
mod empty_type_checking_block;
mod runtime_import_in_type_checking_block;
mod runtime_string_union;
mod typing_only_runtime_import;

View File

@@ -12,7 +12,7 @@ use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::fix;
use crate::importer::ImportedMembers;
use crate::rules::flake8_type_checking::helpers::{filter_contained, quote_annotation};
use crate::rules::flake8_type_checking::helpers::quote_annotation;
use crate::rules::flake8_type_checking::imports::ImportBinding;
/// ## What it does
@@ -262,33 +262,32 @@ pub(crate) fn runtime_import_in_type_checking_block(
/// Generate a [`Fix`] to quote runtime usages for imports in a type-checking block.
fn quote_imports(checker: &Checker, node_id: NodeId, imports: &[ImportBinding]) -> Result<Fix> {
let quote_reference_edits = filter_contained(
imports
.iter()
.flat_map(|ImportBinding { binding, .. }| {
binding.references.iter().filter_map(|reference_id| {
let reference = checker.semantic().reference(*reference_id);
if reference.context().is_runtime() {
Some(quote_annotation(
reference.expression_id()?,
checker.semantic(),
checker.locator(),
checker.stylist(),
checker.generator(),
))
} else {
None
}
})
let mut quote_reference_edits = imports
.iter()
.flat_map(|ImportBinding { binding, .. }| {
binding.references.iter().filter_map(|reference_id| {
let reference = checker.semantic().reference(*reference_id);
if reference.context().is_runtime() {
Some(quote_annotation(
reference.expression_id()?,
checker.semantic(),
checker.locator(),
checker.stylist(),
))
} else {
None
}
})
.collect::<Result<Vec<_>>>()?,
);
let mut rest = quote_reference_edits.into_iter();
let head = rest.next().expect("Expected at least one reference");
Ok(Fix::unsafe_edits(head, rest).isolate(Checker::isolation(
checker.semantic().parent_statement_id(node_id),
)))
})
.collect::<Result<Vec<_>>>()?;
let quote_reference_edit = quote_reference_edits
.pop()
.expect("Expected at least one reference");
Ok(
Fix::unsafe_edits(quote_reference_edit, quote_reference_edits).isolate(Checker::isolation(
checker.semantic().parent_statement_id(node_id),
)),
)
}
/// Generate a [`Fix`] to remove runtime imports from a type-checking block.

View File

@@ -1,95 +0,0 @@
use ruff_diagnostics::{Diagnostic, Violation};
use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast as ast;
use ruff_python_ast::{Expr, Operator};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
/// ## What it does
/// Checks for the presence of string literals in `X | Y`-style union types.
///
/// ## Why is this bad?
/// [PEP 604] introduced a new syntax for union type annotations based on the
/// `|` operator.
///
/// While Python's type annotations can typically be wrapped in strings to
/// avoid runtime evaluation, the use of a string member within an `X | Y`-style
/// union type will cause a runtime error.
///
/// Instead, remove the quotes, wrap the _entire_ union in quotes, or use
/// `from __future__ import annotations` to disable runtime evaluation of
/// annotations entirely.
///
/// ## Example
/// ```python
/// var: str | "int"
/// ```
///
/// Use instead:
/// ```python
/// var: str | int
/// ```
///
/// Or, extend the quotes to include the entire union:
/// ```python
/// var: "str | int"
/// ```
///
/// ## References
/// - [PEP 535](https://peps.python.org/pep-0563/)
/// - [PEP 604](https://peps.python.org/pep-0604/)
///
/// [PEP 604]: https://peps.python.org/pep-0604/
#[violation]
pub struct RuntimeStringUnion;
impl Violation for RuntimeStringUnion {
#[derive_message_formats]
fn message(&self) -> String {
format!("Invalid string member in `X | Y`-style union type")
}
}
/// TCH006
pub(crate) fn runtime_string_union(checker: &mut Checker, expr: &Expr) {
if !checker.semantic().in_type_definition() {
return;
}
if !checker.semantic().execution_context().is_runtime() {
return;
}
// Search for strings within the binary operator.
let mut strings = Vec::new();
traverse_op(expr, &mut strings);
for string in strings {
checker
.diagnostics
.push(Diagnostic::new(RuntimeStringUnion, string.range()));
}
}
/// Collect all string members in possibly-nested binary `|` expressions.
fn traverse_op<'a>(expr: &'a Expr, strings: &mut Vec<&'a Expr>) {
match expr {
Expr::StringLiteral(_) => {
strings.push(expr);
}
Expr::BytesLiteral(_) => {
strings.push(expr);
}
Expr::BinOp(ast::ExprBinOp {
left,
right,
op: Operator::BitOr,
..
}) => {
traverse_op(left, strings);
traverse_op(right, strings);
}
_ => {}
}
}

View File

@@ -12,9 +12,7 @@ use crate::checkers::ast::Checker;
use crate::codes::Rule;
use crate::fix;
use crate::importer::ImportedMembers;
use crate::rules::flake8_type_checking::helpers::{
filter_contained, is_typing_reference, quote_annotation,
};
use crate::rules::flake8_type_checking::helpers::{is_typing_reference, quote_annotation};
use crate::rules::flake8_type_checking::imports::ImportBinding;
use crate::rules::isort::{categorize, ImportSection, ImportType};
@@ -473,47 +471,41 @@ fn fix_imports(checker: &Checker, node_id: NodeId, imports: &[ImportBinding]) ->
)?;
// Step 2) Add the import to a `TYPE_CHECKING` block.
let (type_checking_edit, add_import_edit) = checker
.importer()
.typing_import_edit(
&ImportedMembers {
statement,
names: member_names.iter().map(AsRef::as_ref).collect(),
},
at,
checker.semantic(),
checker.source_type,
)?
.into_edits();
let add_import_edit = checker.importer().typing_import_edit(
&ImportedMembers {
statement,
names: member_names.iter().map(AsRef::as_ref).collect(),
},
at,
checker.semantic(),
checker.source_type,
)?;
// Step 3) Quote any runtime usages of the referenced symbol.
let quote_reference_edits = filter_contained(
imports
.iter()
.flat_map(|ImportBinding { binding, .. }| {
binding.references.iter().filter_map(|reference_id| {
let reference = checker.semantic().reference(*reference_id);
if reference.context().is_runtime() {
Some(quote_annotation(
reference.expression_id()?,
checker.semantic(),
checker.locator(),
checker.stylist(),
checker.generator(),
))
} else {
None
}
})
let quote_reference_edits = imports
.iter()
.flat_map(|ImportBinding { binding, .. }| {
binding.references.iter().filter_map(|reference_id| {
let reference = checker.semantic().reference(*reference_id);
if reference.context().is_runtime() {
Some(quote_annotation(
reference.expression_id()?,
checker.semantic(),
checker.locator(),
checker.stylist(),
))
} else {
None
}
})
.collect::<Result<Vec<_>>>()?,
);
})
.collect::<Result<Vec<_>>>()?;
Ok(Fix::unsafe_edits(
type_checking_edit,
remove_import_edit,
add_import_edit
.into_edits()
.into_iter()
.chain(std::iter::once(remove_import_edit))
.chain(quote_reference_edits),
)
.isolate(Checker::isolation(

View File

@@ -18,7 +18,5 @@ quote.py:64:28: TCH004 [*] Quote references to `pandas.DataFrame`. Import is in
66 |- def func(value: DataFrame):
66 |+ def func(value: "DataFrame"):
67 67 | ...
68 68 |
69 69 |

View File

@@ -196,146 +196,4 @@ quote.py:54:24: TCH002 Move third-party import `pandas.DataFrame` into a type-ch
|
= help: Move into type-checking block
quote.py:71:24: TCH002 [*] Move third-party import `pandas.DataFrame` into a type-checking block
|
70 | def f():
71 | from pandas import DataFrame, Series
| ^^^^^^^^^ TCH002
72 |
73 | def baz() -> DataFrame | Series:
|
= help: Move into type-checking block
Unsafe fix
1 |+from typing import TYPE_CHECKING
2 |+
3 |+if TYPE_CHECKING:
4 |+ from pandas import DataFrame, Series
1 5 | def f():
2 6 | from pandas import DataFrame
3 7 |
--------------------------------------------------------------------------------
68 72 |
69 73 |
70 74 | def f():
71 |- from pandas import DataFrame, Series
72 75 |
73 |- def baz() -> DataFrame | Series:
76 |+ def baz() -> "DataFrame | Series":
74 77 | ...
75 78 |
76 79 |
quote.py:71:35: TCH002 [*] Move third-party import `pandas.Series` into a type-checking block
|
70 | def f():
71 | from pandas import DataFrame, Series
| ^^^^^^ TCH002
72 |
73 | def baz() -> DataFrame | Series:
|
= help: Move into type-checking block
Unsafe fix
1 |+from typing import TYPE_CHECKING
2 |+
3 |+if TYPE_CHECKING:
4 |+ from pandas import DataFrame, Series
1 5 | def f():
2 6 | from pandas import DataFrame
3 7 |
--------------------------------------------------------------------------------
68 72 |
69 73 |
70 74 | def f():
71 |- from pandas import DataFrame, Series
72 75 |
73 |- def baz() -> DataFrame | Series:
76 |+ def baz() -> "DataFrame | Series":
74 77 | ...
75 78 |
76 79 |
quote.py:78:24: TCH002 [*] Move third-party import `pandas.DataFrame` into a type-checking block
|
77 | def f():
78 | from pandas import DataFrame, Series
| ^^^^^^^^^ TCH002
79 |
80 | def baz() -> (
|
= help: Move into type-checking block
Unsafe fix
1 |+from typing import TYPE_CHECKING
2 |+
3 |+if TYPE_CHECKING:
4 |+ from pandas import DataFrame, Series
1 5 | def f():
2 6 | from pandas import DataFrame
3 7 |
--------------------------------------------------------------------------------
75 79 |
76 80 |
77 81 | def f():
78 |- from pandas import DataFrame, Series
79 82 |
80 83 | def baz() -> (
81 |- DataFrame |
82 |- Series
84 |+ "DataFrame | Series"
83 85 | ):
84 86 | ...
85 87 |
86 88 | class C:
87 |- x: DataFrame[
88 |- int
89 |- ] = 1
89 |+ x: "DataFrame[int]" = 1
90 90 |
91 |- def func() -> DataFrame[[DataFrame[_P, _R]], DataFrame[_P, _R]]:
91 |+ def func() -> "DataFrame[[DataFrame[_P, _R]], DataFrame[_P, _R]]":
92 92 | ...
quote.py:78:35: TCH002 [*] Move third-party import `pandas.Series` into a type-checking block
|
77 | def f():
78 | from pandas import DataFrame, Series
| ^^^^^^ TCH002
79 |
80 | def baz() -> (
|
= help: Move into type-checking block
Unsafe fix
1 |+from typing import TYPE_CHECKING
2 |+
3 |+if TYPE_CHECKING:
4 |+ from pandas import DataFrame, Series
1 5 | def f():
2 6 | from pandas import DataFrame
3 7 |
--------------------------------------------------------------------------------
75 79 |
76 80 |
77 81 | def f():
78 |- from pandas import DataFrame, Series
79 82 |
80 83 | def baz() -> (
81 |- DataFrame |
82 |- Series
84 |+ "DataFrame | Series"
83 85 | ):
84 86 | ...
85 87 |
86 88 | class C:
87 |- x: DataFrame[
88 |- int
89 |- ] = 1
89 |+ x: "DataFrame[int]" = 1
90 90 |
91 |- def func() -> DataFrame[[DataFrame[_P, _R]], DataFrame[_P, _R]]:
91 |+ def func() -> "DataFrame[[DataFrame[_P, _R]], DataFrame[_P, _R]]":
92 92 | ...

View File

@@ -1,12 +0,0 @@
---
source: crates/ruff_linter/src/rules/flake8_type_checking/mod.rs
---
TCH006_1.py:18:30: TCH006 Invalid string member in `X | Y`-style union type
|
16 | type A = Value["int" | str] # OK
17 |
18 | OldS = TypeVar('OldS', int | 'str', str) # TCH006
| ^^^^^ TCH006
|

View File

@@ -1,41 +0,0 @@
---
source: crates/ruff_linter/src/rules/flake8_type_checking/mod.rs
---
TCH006_2.py:4:4: TCH006 Invalid string member in `X | Y`-style union type
|
4 | x: "int" | str # TCH006
| ^^^^^ TCH006
5 | x: ("int" | str) | "bool" # TCH006
|
TCH006_2.py:5:5: TCH006 Invalid string member in `X | Y`-style union type
|
4 | x: "int" | str # TCH006
5 | x: ("int" | str) | "bool" # TCH006
| ^^^^^ TCH006
|
TCH006_2.py:5:20: TCH006 Invalid string member in `X | Y`-style union type
|
4 | x: "int" | str # TCH006
5 | x: ("int" | str) | "bool" # TCH006
| ^^^^^^ TCH006
|
TCH006_2.py:12:20: TCH006 Invalid string member in `X | Y`-style union type
|
12 | z: list[str, str | "int"] = [] # TCH006
| ^^^^^ TCH006
13 |
14 | type A = Value["int" | str] # OK
|
TCH006_2.py:16:30: TCH006 Invalid string member in `X | Y`-style union type
|
14 | type A = Value["int" | str] # OK
15 |
16 | OldS = TypeVar('OldS', int | 'str', str) # TCH006
| ^^^^^ TCH006
|

View File

@@ -1,27 +0,0 @@
---
source: crates/ruff_linter/src/rules/flake8_type_checking/mod.rs
---
exempt_type_checking_1.py:5:20: TCH003 [*] Move standard library import `typing.Final` into a type-checking block
|
3 | from __future__ import annotations
4 |
5 | from typing import Final
| ^^^^^ TCH003
6 |
7 | Const: Final[dict] = {}
|
= help: Move into type-checking block
Unsafe fix
2 2 |
3 3 | from __future__ import annotations
4 4 |
5 |-from typing import Final
5 |+from typing import TYPE_CHECKING
6 |+
7 |+if TYPE_CHECKING:
8 |+ from typing import Final
6 9 |
7 10 | Const: Final[dict] = {}

View File

@@ -1,27 +0,0 @@
---
source: crates/ruff_linter/src/rules/flake8_type_checking/mod.rs
---
exempt_type_checking_2.py:5:20: TCH003 [*] Move standard library import `typing.Final` into a type-checking block
|
3 | from __future__ import annotations
4 |
5 | from typing import Final, TYPE_CHECKING
| ^^^^^ TCH003
6 |
7 | Const: Final[dict] = {}
|
= help: Move into type-checking block
Unsafe fix
2 2 |
3 3 | from __future__ import annotations
4 4 |
5 |-from typing import Final, TYPE_CHECKING
5 |+from typing import TYPE_CHECKING
6 |+
7 |+if TYPE_CHECKING:
8 |+ from typing import Final
6 9 |
7 10 | Const: Final[dict] = {}

View File

@@ -1,28 +0,0 @@
---
source: crates/ruff_linter/src/rules/flake8_type_checking/mod.rs
---
exempt_type_checking_3.py:5:20: TCH003 [*] Move standard library import `typing.Final` into a type-checking block
|
3 | from __future__ import annotations
4 |
5 | from typing import Final, Mapping
| ^^^^^ TCH003
6 |
7 | Const: Final[dict] = {}
|
= help: Move into type-checking block
Unsafe fix
2 2 |
3 3 | from __future__ import annotations
4 4 |
5 |-from typing import Final, Mapping
5 |+from typing import Mapping
6 |+from typing import TYPE_CHECKING
7 |+
8 |+if TYPE_CHECKING:
9 |+ from typing import Final
6 10 |
7 11 | Const: Final[dict] = {}

View File

@@ -35,8 +35,7 @@ mod tests {
#[test_case(Rule::LineTooLong, Path::new("E501_3.py"))]
#[test_case(Rule::MixedSpacesAndTabs, Path::new("E101.py"))]
#[test_case(Rule::ModuleImportNotAtTopOfFile, Path::new("E40.py"))]
#[test_case(Rule::ModuleImportNotAtTopOfFile, Path::new("E402_0.py"))]
#[test_case(Rule::ModuleImportNotAtTopOfFile, Path::new("E402_1.py"))]
#[test_case(Rule::ModuleImportNotAtTopOfFile, Path::new("E402.py"))]
#[test_case(Rule::ModuleImportNotAtTopOfFile, Path::new("E402.ipynb"))]
#[test_case(Rule::MultipleImportsOnOneLine, Path::new("E40.py"))]
#[test_case(Rule::MultipleStatementsOnOneLineColon, Path::new("E70.py"))]
@@ -66,7 +65,7 @@ mod tests {
}
#[test_case(Rule::IsLiteral, Path::new("constant_literals.py"))]
#[test_case(Rule::ModuleImportNotAtTopOfFile, Path::new("E402_0.py"))]
#[test_case(Rule::ModuleImportNotAtTopOfFile, Path::new("E402.py"))]
#[test_case(Rule::TypeComparison, Path::new("E721.py"))]
fn preview_rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!(

Some files were not shown because too many files have changed in this diff Show More